mirror of https://github.com/citusdata/citus.git
Compare commits
58 Commits
Author | SHA1 | Date |
---|---|---|
|
2a17fdbb88 | |
|
cdf8a123c6 | |
|
46bc57a99a | |
|
1a21c524b7 | |
|
be3da3901e | |
|
6348efca64 | |
|
0e147f9c8c | |
|
281460fbaa | |
|
3065966d13 | |
|
d0000a15bd | |
|
a5adc49077 | |
|
7233c27533 | |
|
51f422f3c6 | |
|
8fae9aae96 | |
|
59774b1dd4 | |
|
221aa1a381 | |
|
7e99324bd9 | |
|
447c7ecdd4 | |
|
a943696c44 | |
|
ca2bbd89b6 | |
|
3d2e1a7464 | |
|
b7b7b66beb | |
|
f6e5006dfd | |
|
03e4bec352 | |
|
6aac62e847 | |
|
72c54b5cdd | |
|
637d93e8ff | |
|
7a00c5b83c | |
|
51b7b01a09 | |
|
993a402c73 | |
|
39e63f5a08 | |
|
2271e9ded1 | |
|
4c90dbbd88 | |
|
388893ce5e | |
|
4b98f6c5c2 | |
|
97dda868a0 | |
|
27ef768f36 | |
|
c238e6c8b0 | |
|
a04e7b233e | |
|
0bd4002e5f | |
|
23f24a9668 | |
|
d77e386e92 | |
|
b7b960955c | |
|
49f130fcd3 | |
|
b8c0e5ef1e | |
|
55eed7f2ec | |
|
49d23229c4 | |
|
48fab6f264 | |
|
9a4fddc9c5 | |
|
1d54b8f301 | |
|
5e648e1a78 | |
|
fc711af85b | |
|
21ca434bef | |
|
61ab7006d0 | |
|
3de2d2868d | |
|
77b4534c72 | |
|
4b493f088b | |
|
06c878b348 |
|
@ -287,7 +287,12 @@ workflows:
|
|||
version: 2
|
||||
build_and_test:
|
||||
jobs:
|
||||
- check-merge-to-enterprise
|
||||
- check-merge-to-enterprise:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /release-[0-9]+\.[0-9]+.*/ # match with releaseX.Y.*
|
||||
|
||||
- build
|
||||
- check-style
|
||||
- check-sql-snapshots
|
||||
|
|
105
CHANGELOG.md
105
CHANGELOG.md
|
@ -1,3 +1,108 @@
|
|||
### citus v9.4.5 (July 7, 2021) ###
|
||||
|
||||
* Adds a configure flag to enforce security
|
||||
|
||||
* Avoids re-using connections for intermediate results
|
||||
|
||||
* Fixes a bug that causes pruning incorrect shard of a range distributed table
|
||||
|
||||
* Fixes a bug that might cause self-deadlocks when COPY used in TX block
|
||||
|
||||
* Fixes an issue that could cause citus_finish_pg_upgrade to fail
|
||||
|
||||
### citus v9.4.4 (December 28, 2020) ###
|
||||
|
||||
* Fixes a bug that could cause router queries with local tables to be pushed
|
||||
down
|
||||
|
||||
* Fixes a segfault in connection management due to invalid connection hash
|
||||
entries
|
||||
|
||||
* Fixes possible issues that might occur with single shard distributed tables
|
||||
|
||||
### citus v9.4.3 (November 24, 2020) ###
|
||||
|
||||
* Enables PostgreSQL's parallel queries on EXPLAIN ANALYZE
|
||||
|
||||
* Fixes a bug that triggers subplan executions unnecessarily with cursors
|
||||
|
||||
### citus v9.4.2 (October 21, 2020) ###
|
||||
|
||||
* Fixes a bug that could lead to multiple maintenance daemons
|
||||
|
||||
* Fixes an issue preventing views in reference table modifications
|
||||
|
||||
### citus v9.4.1 (September 30, 2020) ###
|
||||
|
||||
* Fixes EXPLAIN ANALYZE output truncation
|
||||
|
||||
* Fixes a deadlock during transaction recovery
|
||||
|
||||
### citus v9.4.0 (July 28, 2020) ###
|
||||
|
||||
* Improves COPY by honoring max_adaptive_executor_pool_size config
|
||||
|
||||
* Adds support for insert into local table select from distributed table
|
||||
|
||||
* Adds support to partially push down tdigest aggregates
|
||||
|
||||
* Adds support for receiving binary encoded results from workers using
|
||||
citus.enable_binary_protocol
|
||||
|
||||
* Enables joins between local tables and CTEs
|
||||
|
||||
* Adds showing query text in EXPLAIN output when explain verbose is true
|
||||
|
||||
* Adds support for showing CTE statistics in EXPLAIN ANALYZE
|
||||
|
||||
* Adds support for showing amount of data received in EXPLAIN ANALYZE
|
||||
|
||||
* Introduces downgrade paths in migration scripts
|
||||
|
||||
* Avoids returning incorrect results when changing roles in a transaction
|
||||
|
||||
* Fixes `ALTER TABLE IF EXISTS SET SCHEMA` with non-existing table bug
|
||||
|
||||
* Fixes `CREATE INDEX CONCURRENTLY` with no index name on a postgres table bug
|
||||
|
||||
* Fixes a bug that could cause crashes with certain compile flags
|
||||
|
||||
* Fixes a bug with lists of configuration values in ALTER ROLE SET statements
|
||||
|
||||
* Fixes a bug that occurs when coordinator is added as a worker node
|
||||
|
||||
* Fixes a crash because of overflow in partition id with certain compile flags
|
||||
|
||||
* Fixes a crash that may happen if no worker nodes are added
|
||||
|
||||
* Fixes a crash that occurs when inserting implicitly coerced constants
|
||||
|
||||
* Fixes a crash when aggregating empty tables
|
||||
|
||||
* Fixes a memory leak in subtransaction memory handling
|
||||
|
||||
* Fixes crash when using rollback to savepoint after cancellation of DML
|
||||
|
||||
* Fixes deparsing for queries with anonymous column references
|
||||
|
||||
* Fixes distribution of composite types failing to include typemods
|
||||
|
||||
* Fixes explain analyze on adaptive executor repartitions
|
||||
|
||||
* Fixes possible error throwing in abort handle
|
||||
|
||||
* Fixes segfault when evaluating func calls with default params on coordinator
|
||||
|
||||
* Fixes several EXPLAIN ANALYZE issues
|
||||
|
||||
* Fixes write queries with const expressions and COLLATE in various places
|
||||
|
||||
* Fixes wrong cancellation message about distributed deadlocks
|
||||
|
||||
* Reports correct INSERT/SELECT method in EXPLAIN
|
||||
|
||||
* Disallows triggers on citus tables
|
||||
|
||||
### citus v9.3.2 (Jun 22, 2020) ###
|
||||
|
||||
* Fixes a version bump issue in 9.3.1
|
||||
|
|
8
Makefile
8
Makefile
|
@ -24,6 +24,7 @@ install-headers: extension
|
|||
$(INSTALL_DATA) $(citus_top_builddir)/src/include/citus_version.h '$(DESTDIR)$(includedir_server)/'
|
||||
# the rest in the source tree
|
||||
$(INSTALL_DATA) $(citus_abs_srcdir)/src/include/distributed/*.h '$(DESTDIR)$(includedir_server)/distributed/'
|
||||
|
||||
clean-extension:
|
||||
$(MAKE) -C src/backend/distributed/ clean
|
||||
clean-full:
|
||||
|
@ -31,6 +32,11 @@ clean-full:
|
|||
.PHONY: extension install-extension clean-extension clean-full
|
||||
# Add to generic targets
|
||||
install: install-extension install-headers
|
||||
install-downgrades:
|
||||
$(MAKE) -C src/backend/distributed/ install-downgrades
|
||||
install-all: install-headers
|
||||
$(MAKE) -C src/backend/distributed/ install-all
|
||||
|
||||
clean: clean-extension
|
||||
|
||||
# apply or check style
|
||||
|
@ -44,4 +50,4 @@ check-style:
|
|||
check: all install
|
||||
$(MAKE) -C src/test/regress check-full
|
||||
|
||||
.PHONY: all check install clean
|
||||
.PHONY: all check clean install install-downgrades install-all
|
||||
|
|
|
@ -86,6 +86,7 @@ endif
|
|||
|
||||
# Add options passed to configure or computed therein, to CFLAGS/CPPFLAGS/...
|
||||
override CFLAGS += @CFLAGS@ @CITUS_CFLAGS@
|
||||
override BITCODE_CFLAGS := $(BITCODE_CFLAGS) @CITUS_BITCODE_CFLAGS@
|
||||
ifneq ($(GIT_VERSION),)
|
||||
override CFLAGS += -DGIT_VERSION=\"$(GIT_VERSION)\"
|
||||
endif
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#! /bin/sh
|
||||
# Guess values for system-dependent variables and create Makefiles.
|
||||
# Generated by GNU Autoconf 2.69 for Citus 9.4devel.
|
||||
# Generated by GNU Autoconf 2.69 for Citus 9.4.5.
|
||||
#
|
||||
#
|
||||
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
||||
|
@ -579,8 +579,8 @@ MAKEFLAGS=
|
|||
# Identity of this package.
|
||||
PACKAGE_NAME='Citus'
|
||||
PACKAGE_TARNAME='citus'
|
||||
PACKAGE_VERSION='9.4devel'
|
||||
PACKAGE_STRING='Citus 9.4devel'
|
||||
PACKAGE_VERSION='9.4.5'
|
||||
PACKAGE_STRING='Citus 9.4.5'
|
||||
PACKAGE_BUGREPORT=''
|
||||
PACKAGE_URL=''
|
||||
|
||||
|
@ -627,8 +627,10 @@ POSTGRES_BUILDDIR
|
|||
POSTGRES_SRCDIR
|
||||
CITUS_LDFLAGS
|
||||
CITUS_CPPFLAGS
|
||||
CITUS_BITCODE_CFLAGS
|
||||
CITUS_CFLAGS
|
||||
GIT_BIN
|
||||
with_security_flags
|
||||
EGREP
|
||||
GREP
|
||||
CPP
|
||||
|
@ -664,6 +666,7 @@ infodir
|
|||
docdir
|
||||
oldincludedir
|
||||
includedir
|
||||
runstatedir
|
||||
localstatedir
|
||||
sharedstatedir
|
||||
sysconfdir
|
||||
|
@ -690,6 +693,7 @@ with_extra_version
|
|||
enable_coverage
|
||||
with_libcurl
|
||||
with_reports_hostname
|
||||
with_security_flags
|
||||
'
|
||||
ac_precious_vars='build_alias
|
||||
host_alias
|
||||
|
@ -740,6 +744,7 @@ datadir='${datarootdir}'
|
|||
sysconfdir='${prefix}/etc'
|
||||
sharedstatedir='${prefix}/com'
|
||||
localstatedir='${prefix}/var'
|
||||
runstatedir='${localstatedir}/run'
|
||||
includedir='${prefix}/include'
|
||||
oldincludedir='/usr/include'
|
||||
docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
|
||||
|
@ -992,6 +997,15 @@ do
|
|||
| -silent | --silent | --silen | --sile | --sil)
|
||||
silent=yes ;;
|
||||
|
||||
-runstatedir | --runstatedir | --runstatedi | --runstated \
|
||||
| --runstate | --runstat | --runsta | --runst | --runs \
|
||||
| --run | --ru | --r)
|
||||
ac_prev=runstatedir ;;
|
||||
-runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
|
||||
| --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
|
||||
| --run=* | --ru=* | --r=*)
|
||||
runstatedir=$ac_optarg ;;
|
||||
|
||||
-sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
|
||||
ac_prev=sbindir ;;
|
||||
-sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
|
||||
|
@ -1129,7 +1143,7 @@ fi
|
|||
for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
|
||||
datadir sysconfdir sharedstatedir localstatedir includedir \
|
||||
oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
|
||||
libdir localedir mandir
|
||||
libdir localedir mandir runstatedir
|
||||
do
|
||||
eval ac_val=\$$ac_var
|
||||
# Remove trailing slashes.
|
||||
|
@ -1242,7 +1256,7 @@ if test "$ac_init_help" = "long"; then
|
|||
# Omit some internal or obsolete options to make the list less imposing.
|
||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||
cat <<_ACEOF
|
||||
\`configure' configures Citus 9.4devel to adapt to many kinds of systems.
|
||||
\`configure' configures Citus 9.4.5 to adapt to many kinds of systems.
|
||||
|
||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||
|
||||
|
@ -1282,6 +1296,7 @@ Fine tuning of the installation directories:
|
|||
--sysconfdir=DIR read-only single-machine data [PREFIX/etc]
|
||||
--sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
|
||||
--localstatedir=DIR modifiable single-machine data [PREFIX/var]
|
||||
--runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run]
|
||||
--libdir=DIR object code libraries [EPREFIX/lib]
|
||||
--includedir=DIR C header files [PREFIX/include]
|
||||
--oldincludedir=DIR C header files for non-gcc [/usr/include]
|
||||
|
@ -1303,7 +1318,7 @@ fi
|
|||
|
||||
if test -n "$ac_init_help"; then
|
||||
case $ac_init_help in
|
||||
short | recursive ) echo "Configuration of Citus 9.4devel:";;
|
||||
short | recursive ) echo "Configuration of Citus 9.4.5:";;
|
||||
esac
|
||||
cat <<\_ACEOF
|
||||
|
||||
|
@ -1323,6 +1338,7 @@ Optional Packages:
|
|||
--with-reports-hostname=HOSTNAME
|
||||
Use HOSTNAME as hostname for statistics collection
|
||||
and update checks
|
||||
--with-security-flags use security flags
|
||||
|
||||
Some influential environment variables:
|
||||
PG_CONFIG Location to find pg_config for target PostgreSQL instalation
|
||||
|
@ -1403,7 +1419,7 @@ fi
|
|||
test -n "$ac_init_help" && exit $ac_status
|
||||
if $ac_init_version; then
|
||||
cat <<\_ACEOF
|
||||
Citus configure 9.4devel
|
||||
Citus configure 9.4.5
|
||||
generated by GNU Autoconf 2.69
|
||||
|
||||
Copyright (C) 2012 Free Software Foundation, Inc.
|
||||
|
@ -1886,7 +1902,7 @@ cat >config.log <<_ACEOF
|
|||
This file contains any messages produced by compilers while
|
||||
running configure, to aid debugging if configure makes a mistake.
|
||||
|
||||
It was created by Citus $as_me 9.4devel, which was
|
||||
It was created by Citus $as_me 9.4.5, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
$ $0 $@
|
||||
|
@ -4327,6 +4343,48 @@ if test x"$citusac_cv_prog_cc_cflags__Werror_return_type" = x"yes"; then
|
|||
CITUS_CFLAGS="$CITUS_CFLAGS -Werror=return-type"
|
||||
fi
|
||||
|
||||
# Security flags
|
||||
# Flags taken from: https://liquid.microsoft.com/Web/Object/Read/ms.security/Requirements/Microsoft.Security.SystemsADM.10203#guide
|
||||
# We do not enforce the following flag because it is only available on GCC>=8
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -fstack-clash-protection" >&5
|
||||
$as_echo_n "checking whether $CC supports -fstack-clash-protection... " >&6; }
|
||||
if ${citusac_cv_prog_cc_cflags__fstack_clash_protection+:} false; then :
|
||||
$as_echo_n "(cached) " >&6
|
||||
else
|
||||
citusac_save_CFLAGS=$CFLAGS
|
||||
flag=-fstack-clash-protection
|
||||
case $flag in -Wno*)
|
||||
flag=-W$(echo $flag | cut -c 6-)
|
||||
esac
|
||||
CFLAGS="$citusac_save_CFLAGS $flag"
|
||||
ac_save_c_werror_flag=$ac_c_werror_flag
|
||||
ac_c_werror_flag=yes
|
||||
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
|
||||
/* end confdefs.h. */
|
||||
|
||||
int
|
||||
main ()
|
||||
{
|
||||
|
||||
;
|
||||
return 0;
|
||||
}
|
||||
_ACEOF
|
||||
if ac_fn_c_try_compile "$LINENO"; then :
|
||||
citusac_cv_prog_cc_cflags__fstack_clash_protection=yes
|
||||
else
|
||||
citusac_cv_prog_cc_cflags__fstack_clash_protection=no
|
||||
fi
|
||||
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
|
||||
ac_c_werror_flag=$ac_save_c_werror_flag
|
||||
CFLAGS="$citusac_save_CFLAGS"
|
||||
fi
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $citusac_cv_prog_cc_cflags__fstack_clash_protection" >&5
|
||||
$as_echo "$citusac_cv_prog_cc_cflags__fstack_clash_protection" >&6; }
|
||||
if test x"$citusac_cv_prog_cc_cflags__fstack_clash_protection" = x"yes"; then
|
||||
CITUS_CFLAGS="$CITUS_CFLAGS -fstack-clash-protection"
|
||||
fi
|
||||
|
||||
|
||||
#
|
||||
# --enable-coverage enables generation of code coverage metrics with gcov
|
||||
|
@ -4468,6 +4526,54 @@ cat >>confdefs.h <<_ACEOF
|
|||
_ACEOF
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Check whether --with-security-flags was given.
|
||||
if test "${with_security_flags+set}" = set; then :
|
||||
withval=$with_security_flags;
|
||||
case $withval in
|
||||
yes)
|
||||
:
|
||||
;;
|
||||
no)
|
||||
:
|
||||
;;
|
||||
*)
|
||||
as_fn_error $? "no argument expected for --with-security-flags option" "$LINENO" 5
|
||||
;;
|
||||
esac
|
||||
|
||||
else
|
||||
with_security_flags=no
|
||||
|
||||
fi
|
||||
|
||||
|
||||
|
||||
|
||||
if test "$with_security_flags" = yes; then
|
||||
# Flags taken from: https://liquid.microsoft.com/Web/Object/Read/ms.security/Requirements/Microsoft.Security.SystemsADM.10203#guide
|
||||
|
||||
# We always want to have some compiler flags for security concerns.
|
||||
SECURITY_CFLAGS="-fstack-protector-strong -D_FORTIFY_SOURCE=2 -O2 -z noexecstack -fpic -shared -Wl,-z,relro -Wl,-z,now -Wformat -Wformat-security -Werror=format-security"
|
||||
CITUS_CFLAGS="$CITUS_CFLAGS $SECURITY_CFLAGS"
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Blindly added security flags for linker: $SECURITY_CFLAGS" >&5
|
||||
$as_echo "$as_me: Blindly added security flags for linker: $SECURITY_CFLAGS" >&6;}
|
||||
|
||||
# We always want to have some clang flags for security concerns.
|
||||
# This doesn't include "-Wl,-z,relro -Wl,-z,now" on purpuse, because bitcode is not linked.
|
||||
# This doesn't include -fsanitize=cfi because it breaks builds on many distros including
|
||||
# Debian/Buster, Debian/Stretch, Ubuntu/Bionic, Ubuntu/Xenial and EL7.
|
||||
SECURITY_BITCODE_CFLAGS="-fsanitize=safe-stack -fstack-protector-strong -flto -fPIC -Wformat -Wformat-security -Werror=format-security"
|
||||
CITUS_BITCODE_CFLAGS="$CITUS_BITCODE_CFLAGS $SECURITY_BITCODE_CFLAGS"
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Blindly added security flags for llvm: $SECURITY_BITCODE_CFLAGS" >&5
|
||||
$as_echo "$as_me: Blindly added security flags for llvm: $SECURITY_BITCODE_CFLAGS" >&6;}
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: If you run into issues during linking or bitcode compilation, you can use --without-security-flags." >&5
|
||||
$as_echo "$as_me: WARNING: If you run into issues during linking or bitcode compilation, you can use --without-security-flags." >&2;}
|
||||
fi
|
||||
|
||||
# Check if git is installed, when installed the gitref of the checkout will be baked in the application
|
||||
# Extract the first word of "git", so it can be a program name with args.
|
||||
set dummy git; ac_word=$2
|
||||
|
@ -4533,6 +4639,8 @@ fi
|
|||
|
||||
CITUS_CFLAGS="$CITUS_CFLAGS"
|
||||
|
||||
CITUS_BITCODE_CFLAGS="$CITUS_BITCODE_CFLAGS"
|
||||
|
||||
CITUS_CPPFLAGS="$CITUS_CPPFLAGS"
|
||||
|
||||
CITUS_LDFLAGS="$LIBS $CITUS_LDFLAGS"
|
||||
|
@ -5055,7 +5163,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
|||
# report actual input values of CONFIG_FILES etc. instead of their
|
||||
# values after options handling.
|
||||
ac_log="
|
||||
This file was extended by Citus $as_me 9.4devel, which was
|
||||
This file was extended by Citus $as_me 9.4.5, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
CONFIG_FILES = $CONFIG_FILES
|
||||
|
@ -5117,7 +5225,7 @@ _ACEOF
|
|||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
||||
ac_cs_version="\\
|
||||
Citus config.status 9.4devel
|
||||
Citus config.status 9.4.5
|
||||
configured by $0, generated by GNU Autoconf 2.69,
|
||||
with options \\"\$ac_cs_config\\"
|
||||
|
||||
|
|
30
configure.in
30
configure.in
|
@ -5,7 +5,7 @@
|
|||
# everyone needing autoconf installed, the resulting files are checked
|
||||
# into the SCM.
|
||||
|
||||
AC_INIT([Citus], [9.4devel])
|
||||
AC_INIT([Citus], [9.4.5])
|
||||
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
|
||||
|
||||
# we'll need sed and awk for some of the version commands
|
||||
|
@ -174,6 +174,10 @@ CITUSAC_PROG_CC_CFLAGS_OPT([-Werror=vla]) # visual studio does not support thes
|
|||
CITUSAC_PROG_CC_CFLAGS_OPT([-Werror=implicit-int])
|
||||
CITUSAC_PROG_CC_CFLAGS_OPT([-Werror=implicit-function-declaration])
|
||||
CITUSAC_PROG_CC_CFLAGS_OPT([-Werror=return-type])
|
||||
# Security flags
|
||||
# Flags taken from: https://liquid.microsoft.com/Web/Object/Read/ms.security/Requirements/Microsoft.Security.SystemsADM.10203#guide
|
||||
# We do not enforce the following flag because it is only available on GCC>=8
|
||||
CITUSAC_PROG_CC_CFLAGS_OPT([-fstack-clash-protection])
|
||||
|
||||
#
|
||||
# --enable-coverage enables generation of code coverage metrics with gcov
|
||||
|
@ -212,11 +216,35 @@ PGAC_ARG_REQ(with, reports-hostname, [HOSTNAME],
|
|||
AC_DEFINE_UNQUOTED(REPORTS_BASE_URL, "$REPORTS_BASE_URL",
|
||||
[Base URL for statistics collection and update checks])
|
||||
|
||||
PGAC_ARG_BOOL(with, security-flags, no,
|
||||
[use security flags])
|
||||
AC_SUBST(with_security_flags)
|
||||
|
||||
if test "$with_security_flags" = yes; then
|
||||
# Flags taken from: https://liquid.microsoft.com/Web/Object/Read/ms.security/Requirements/Microsoft.Security.SystemsADM.10203#guide
|
||||
|
||||
# We always want to have some compiler flags for security concerns.
|
||||
SECURITY_CFLAGS="-fstack-protector-strong -D_FORTIFY_SOURCE=2 -O2 -z noexecstack -fpic -shared -Wl,-z,relro -Wl,-z,now -Wformat -Wformat-security -Werror=format-security"
|
||||
CITUS_CFLAGS="$CITUS_CFLAGS $SECURITY_CFLAGS"
|
||||
AC_MSG_NOTICE([Blindly added security flags for linker: $SECURITY_CFLAGS])
|
||||
|
||||
# We always want to have some clang flags for security concerns.
|
||||
# This doesn't include "-Wl,-z,relro -Wl,-z,now" on purpuse, because bitcode is not linked.
|
||||
# This doesn't include -fsanitize=cfi because it breaks builds on many distros including
|
||||
# Debian/Buster, Debian/Stretch, Ubuntu/Bionic, Ubuntu/Xenial and EL7.
|
||||
SECURITY_BITCODE_CFLAGS="-fsanitize=safe-stack -fstack-protector-strong -flto -fPIC -Wformat -Wformat-security -Werror=format-security"
|
||||
CITUS_BITCODE_CFLAGS="$CITUS_BITCODE_CFLAGS $SECURITY_BITCODE_CFLAGS"
|
||||
AC_MSG_NOTICE([Blindly added security flags for llvm: $SECURITY_BITCODE_CFLAGS])
|
||||
|
||||
AC_MSG_WARN([If you run into issues during linking or bitcode compilation, you can use --without-security-flags.])
|
||||
fi
|
||||
|
||||
# Check if git is installed, when installed the gitref of the checkout will be baked in the application
|
||||
AC_PATH_PROG(GIT_BIN, git)
|
||||
AC_CHECK_FILE(.git,[HAS_DOTGIT=yes], [HAS_DOTGIT=])
|
||||
|
||||
AC_SUBST(CITUS_CFLAGS, "$CITUS_CFLAGS")
|
||||
AC_SUBST(CITUS_BITCODE_CFLAGS, "$CITUS_BITCODE_CFLAGS")
|
||||
AC_SUBST(CITUS_CPPFLAGS, "$CITUS_CPPFLAGS")
|
||||
AC_SUBST(CITUS_LDFLAGS, "$LIBS $CITUS_LDFLAGS")
|
||||
AC_SUBST(POSTGRES_SRCDIR, "$POSTGRES_SRCDIR")
|
||||
|
|
|
@ -11,7 +11,9 @@ MODULE_big = citus
|
|||
EXTENSION = citus
|
||||
|
||||
template_sql_files = $(patsubst $(citus_abs_srcdir)/%,%,$(wildcard $(citus_abs_srcdir)/sql/*.sql))
|
||||
template_downgrade_sql_files = $(patsubst $(citus_abs_srcdir)/sql/downgrades/%,%,$(wildcard $(citus_abs_srcdir)/sql/downgrades/*.sql))
|
||||
generated_sql_files = $(patsubst %,$(citus_abs_srcdir)/build/%,$(template_sql_files))
|
||||
generated_downgrade_sql_files += $(patsubst %,$(citus_abs_srcdir)/build/sql/%,$(template_downgrade_sql_files))
|
||||
# All citus--*.sql files that are used to upgrade between versions
|
||||
DATA_built = $(generated_sql_files)
|
||||
|
||||
|
@ -54,6 +56,20 @@ SQL_BUILDDIR=build/sql
|
|||
|
||||
$(generated_sql_files): $(citus_abs_srcdir)/build/%: %
|
||||
@mkdir -p $(citus_abs_srcdir)/$(SQL_DEPDIR) $(citus_abs_srcdir)/$(SQL_BUILDDIR)
|
||||
@# -MF is used to store dependency files(.Po) in another directory for separation
|
||||
@# -MT is used to change the target of the rule emitted by dependency generation.
|
||||
@# -P is used to inhibit generation of linemarkers in the output from the preprocessor.
|
||||
@# -undef is used to not predefine any system-specific or GCC-specific macros.
|
||||
@# `man cpp` for further information
|
||||
cd $(citus_abs_srcdir) && cpp -undef -w -P -MMD -MP -MF$(SQL_DEPDIR)/$(*F).Po -MT$@ $< > $@
|
||||
|
||||
$(generated_downgrade_sql_files): $(citus_abs_srcdir)/build/sql/%: sql/downgrades/%
|
||||
@mkdir -p $(citus_abs_srcdir)/$(SQL_DEPDIR) $(citus_abs_srcdir)/$(SQL_BUILDDIR)
|
||||
@# -MF is used to store dependency files(.Po) in another directory for separation
|
||||
@# -MT is used to change the target of the rule emitted by dependency generation.
|
||||
@# -P is used to inhibit generation of linemarkers in the output from the preprocessor.
|
||||
@# -undef is used to not predefine any system-specific or GCC-specific macros.
|
||||
@# `man cpp` for further information
|
||||
cd $(citus_abs_srcdir) && cpp -undef -w -P -MMD -MP -MF$(SQL_DEPDIR)/$(*F).Po -MT$@ $< > $@
|
||||
|
||||
SQL_Po_files := $(wildcard $(SQL_DEPDIR)/*.Po)
|
||||
|
@ -61,7 +77,7 @@ ifneq (,$(SQL_Po_files))
|
|||
include $(SQL_Po_files)
|
||||
endif
|
||||
|
||||
.PHONY: check-sql-snapshots clean-full
|
||||
.PHONY: check-sql-snapshots clean-full install install-downgrades install-all
|
||||
|
||||
check-sql-snapshots:
|
||||
bash -c '\
|
||||
|
@ -76,6 +92,13 @@ cleanup-before-install:
|
|||
|
||||
install: cleanup-before-install
|
||||
|
||||
# install and install-downgrades should be run sequentially
|
||||
install-all: install
|
||||
make install-downgrades
|
||||
|
||||
install-downgrades: $(generated_downgrade_sql_files)
|
||||
$(INSTALL_DATA) $(generated_downgrade_sql_files) '$(DESTDIR)$(datadir)/$(datamoduledir)/'
|
||||
|
||||
clean-full:
|
||||
make clean
|
||||
rm -rf $(safestringlib_builddir)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Citus extension
|
||||
comment = 'Citus distributed database'
|
||||
default_version = '9.4-1'
|
||||
default_version = '9.4-2'
|
||||
module_pathname = '$libdir/citus'
|
||||
relocatable = false
|
||||
schema = pg_catalog
|
||||
|
|
|
@ -278,7 +278,7 @@ PreprocessDropCollationStmt(Node *node, const char *queryString)
|
|||
(void *) dropStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -311,7 +311,7 @@ PreprocessAlterCollationOwnerStmt(Node *node, const char *queryString)
|
|||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -346,7 +346,7 @@ PreprocessRenameCollationStmt(Node *node, const char *queryString)
|
|||
(void *) renameStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -379,7 +379,7 @@ PreprocessAlterCollationSchemaStmt(Node *node, const char *queryString)
|
|||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -604,6 +604,6 @@ PostprocessDefineCollationStmt(Node *node, const char *queryString)
|
|||
|
||||
MarkObjectDistributed(&collationAddress);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, CreateCollationDDLsIdempotent(
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, CreateCollationDDLsIdempotent(
|
||||
collationAddress.objectId));
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target)
|
|||
* either get it now, or get it in master_add_node after this transaction finishes and
|
||||
* the pg_dist_object record becomes visible.
|
||||
*/
|
||||
List *workerNodeList = ActivePrimaryWorkerNodeList(RowShareLock);
|
||||
List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(RowShareLock);
|
||||
|
||||
/*
|
||||
* right after we acquired the lock we mark our objects as distributed, these changes
|
||||
|
|
|
@ -190,7 +190,7 @@ PostprocessCreateExtensionStmt(Node *node, const char *queryString)
|
|||
|
||||
MarkObjectDistributed(&extensionAddress);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -306,7 +306,7 @@ PreprocessDropExtensionStmt(Node *node, const char *queryString)
|
|||
(void *) deparsedStmt,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -421,7 +421,7 @@ PreprocessAlterExtensionSchemaStmt(Node *node, const char *queryString)
|
|||
(void *) alterExtensionStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -489,7 +489,7 @@ PreprocessAlterExtensionUpdateStmt(Node *node, const char *queryString)
|
|||
(void *) alterExtensionStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -169,7 +169,7 @@ create_distributed_function(PG_FUNCTION_ARGS)
|
|||
const char *alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(funcOid);
|
||||
initStringInfo(&ddlCommand);
|
||||
appendStringInfo(&ddlCommand, "%s;%s", createFunctionSQL, alterFunctionOwnerSQL);
|
||||
SendCommandToWorkersAsUser(ALL_WORKERS, CurrentUserName(), ddlCommand.data);
|
||||
SendCommandToWorkersAsUser(NON_COORDINATOR_NODES, CurrentUserName(), ddlCommand.data);
|
||||
|
||||
MarkObjectDistributed(&functionAddress);
|
||||
|
||||
|
@ -1022,7 +1022,7 @@ EnsureSequentialModeForFunctionDDL(void)
|
|||
static void
|
||||
TriggerSyncMetadataToPrimaryNodes(void)
|
||||
{
|
||||
List *workerList = ActivePrimaryWorkerNodeList(ShareLock);
|
||||
List *workerList = ActivePrimaryNonCoordinatorNodeList(ShareLock);
|
||||
bool triggerMetadataSync = false;
|
||||
|
||||
WorkerNode *workerNode = NULL;
|
||||
|
@ -1192,7 +1192,7 @@ PostprocessCreateFunctionStmt(Node *node, const char *queryString)
|
|||
GetFunctionAlterOwnerCommand(address.objectId),
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1279,7 +1279,7 @@ PreprocessAlterFunctionStmt(Node *node, const char *queryString)
|
|||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1312,7 +1312,7 @@ PreprocessRenameFunctionStmt(Node *node, const char *queryString)
|
|||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1343,7 +1343,7 @@ PreprocessAlterFunctionSchemaStmt(Node *node, const char *queryString)
|
|||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1375,7 +1375,7 @@ PreprocessAlterFunctionOwnerStmt(Node *node, const char *queryString)
|
|||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1477,7 +1477,7 @@ PreprocessDropFunctionStmt(Node *node, const char *queryString)
|
|||
(void *) dropStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -411,6 +411,16 @@ PostprocessIndexStmt(Node *node, const char *queryString)
|
|||
return NIL;
|
||||
}
|
||||
|
||||
/*
|
||||
* We make sure schema name is not null in the PreprocessIndexStmt
|
||||
*/
|
||||
Oid schemaId = get_namespace_oid(indexStmt->relation->schemaname, true);
|
||||
Oid relationId = get_relname_relid(indexStmt->relation->relname, schemaId);
|
||||
if (!IsCitusTable(relationId))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
/* commit the current transaction and start anew */
|
||||
CommitTransactionCommand();
|
||||
StartTransactionCommand();
|
||||
|
@ -418,7 +428,7 @@ PostprocessIndexStmt(Node *node, const char *queryString)
|
|||
/* get the affected relation and index */
|
||||
Relation relation = heap_openrv(indexStmt->relation, ShareUpdateExclusiveLock);
|
||||
Oid indexRelationId = get_relname_relid(indexStmt->idxname,
|
||||
RelationGetNamespace(relation));
|
||||
schemaId);
|
||||
Relation indexRelation = index_open(indexRelationId, RowExclusiveLock);
|
||||
|
||||
/* close relations but retain locks */
|
||||
|
|
|
@ -253,7 +253,8 @@ static CopyShardState * GetShardState(uint64 shardId, HTAB *shardStateHash,
|
|||
copyOutState, bool isCopyToIntermediateFile);
|
||||
static MultiConnection * CopyGetPlacementConnection(HTAB *connectionStateHash,
|
||||
ShardPlacement *placement,
|
||||
bool stopOnFailure);
|
||||
bool stopOnFailure,
|
||||
bool colocatedIntermediateResult);
|
||||
static bool HasReachedAdaptiveExecutorPoolSize(List *connectionStateHash);
|
||||
static MultiConnection * GetLeastUtilisedCopyConnection(List *connectionStateList,
|
||||
char *nodeName, int nodePort);
|
||||
|
@ -2230,7 +2231,10 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation,
|
|||
/* define the template for the COPY statement that is sent to workers */
|
||||
CopyStmt *copyStatement = makeNode(CopyStmt);
|
||||
|
||||
if (copyDest->intermediateResultIdPrefix != NULL)
|
||||
|
||||
bool colocatedIntermediateResults =
|
||||
copyDest->intermediateResultIdPrefix != NULL;
|
||||
if (colocatedIntermediateResults)
|
||||
{
|
||||
copyStatement->relation = makeRangeVar(NULL, copyDest->intermediateResultIdPrefix,
|
||||
-1);
|
||||
|
@ -3448,7 +3452,8 @@ InitializeCopyShardState(CopyShardState *shardState,
|
|||
}
|
||||
|
||||
MultiConnection *connection =
|
||||
CopyGetPlacementConnection(connectionStateHash, placement, stopOnFailure);
|
||||
CopyGetPlacementConnection(connectionStateHash, placement, stopOnFailure,
|
||||
isCopyToIntermediateFile);
|
||||
if (connection == NULL)
|
||||
{
|
||||
failedPlacementCount++;
|
||||
|
@ -3544,11 +3549,40 @@ LogLocalCopyExecution(uint64 shardId)
|
|||
* then it reuses the connection. Otherwise, it requests a connection for placement.
|
||||
*/
|
||||
static MultiConnection *
|
||||
CopyGetPlacementConnection(HTAB *connectionStateHash, ShardPlacement *placement, bool
|
||||
stopOnFailure)
|
||||
CopyGetPlacementConnection(HTAB *connectionStateHash, ShardPlacement *placement,
|
||||
bool stopOnFailure, bool colocatedIntermediateResult)
|
||||
{
|
||||
uint32 connectionFlags = FOR_DML;
|
||||
char *nodeUser = CurrentUserName();
|
||||
if (colocatedIntermediateResult)
|
||||
{
|
||||
/*
|
||||
* Colocated intermediate results are just files and not required to use
|
||||
* the same connections with their co-located shards. So, we are free to
|
||||
* use any connection we can get.
|
||||
*
|
||||
* Also, the current connection re-use logic does not know how to handle
|
||||
* intermediate results as the intermediate results always truncates the
|
||||
* existing files. That's why we we use one connection per intermediate
|
||||
* result.
|
||||
*
|
||||
* Also note that we are breaking the guarantees of citus.shared_pool_size
|
||||
* as we cannot rely on optional connections.
|
||||
*/
|
||||
uint32 connectionFlagsForIntermediateResult = 0;
|
||||
MultiConnection *connection =
|
||||
GetNodeConnection(connectionFlagsForIntermediateResult, placement->nodeName,
|
||||
placement->nodePort);
|
||||
|
||||
/*
|
||||
* As noted above, we want each intermediate file to go over
|
||||
* a separate connection.
|
||||
*/
|
||||
ClaimConnectionExclusively(connection);
|
||||
|
||||
/* and, we cannot afford to handle failures when anything goes wrong */
|
||||
MarkRemoteTransactionCritical(connection);
|
||||
|
||||
return connection;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine whether the task has to be assigned to a particular connection
|
||||
|
@ -3556,10 +3590,10 @@ CopyGetPlacementConnection(HTAB *connectionStateHash, ShardPlacement *placement,
|
|||
*/
|
||||
ShardPlacementAccess *placementAccess = CreatePlacementAccess(placement,
|
||||
PLACEMENT_ACCESS_DML);
|
||||
MultiConnection *connection = GetConnectionIfPlacementAccessedInXact(connectionFlags,
|
||||
list_make1(
|
||||
placementAccess),
|
||||
NULL);
|
||||
uint32 connectionFlags = FOR_DML;
|
||||
MultiConnection *connection =
|
||||
GetConnectionIfPlacementAccessedInXact(connectionFlags,
|
||||
list_make1(placementAccess), NULL);
|
||||
if (connection != NULL)
|
||||
{
|
||||
return connection;
|
||||
|
@ -3585,6 +3619,12 @@ CopyGetPlacementConnection(HTAB *connectionStateHash, ShardPlacement *placement,
|
|||
*/
|
||||
Assert(connection != NULL);
|
||||
|
||||
/*
|
||||
* Make sure that the connection management remembers that Citus
|
||||
* accesses this placement over the connection.
|
||||
*/
|
||||
AssignPlacementListToConnection(list_make1(placementAccess), connection);
|
||||
|
||||
return connection;
|
||||
}
|
||||
|
||||
|
@ -3599,6 +3639,7 @@ CopyGetPlacementConnection(HTAB *connectionStateHash, ShardPlacement *placement,
|
|||
connectionFlags |= CONNECTION_PER_PLACEMENT;
|
||||
}
|
||||
|
||||
char *nodeUser = CurrentUserName();
|
||||
connection = GetPlacementConnection(connectionFlags, placement, nodeUser);
|
||||
|
||||
if (PQstatus(connection->pgConn) != CONNECTION_OK)
|
||||
|
@ -3663,7 +3704,7 @@ GetLeastUtilisedCopyConnection(List *connectionStateList, char *nodeName,
|
|||
int nodePort)
|
||||
{
|
||||
MultiConnection *connection = NULL;
|
||||
int minPlacementCount = INT32_MAX;
|
||||
int minPlacementCount = PG_INT32_MAX;
|
||||
ListCell *connectionStateCell = NULL;
|
||||
|
||||
foreach(connectionStateCell, connectionStateList)
|
||||
|
|
|
@ -38,11 +38,13 @@
|
|||
#include "nodes/makefuncs.h"
|
||||
#include "nodes/parsenodes.h"
|
||||
#include "nodes/pg_list.h"
|
||||
#include "parser/scansup.h"
|
||||
#include "utils/acl.h"
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/guc_tables.h"
|
||||
#include "utils/guc.h"
|
||||
#include "utils/rel.h"
|
||||
#include "utils/varlena.h"
|
||||
#include "utils/syscache.h"
|
||||
|
||||
static const char * ExtractEncryptedPassword(Oid roleOid);
|
||||
|
@ -169,7 +171,7 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString)
|
|||
}
|
||||
List *commands = list_make1((void *) CreateAlterRoleIfExistsCommand(stmt));
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -210,7 +212,7 @@ PreprocessAlterRoleSetStmt(Node *node, const char *queryString)
|
|||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commandList);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList);
|
||||
}
|
||||
|
||||
|
||||
|
@ -410,7 +412,7 @@ MakeVariableSetStmt(const char *config)
|
|||
VariableSetStmt *variableSetStmt = makeNode(VariableSetStmt);
|
||||
variableSetStmt->kind = VAR_SET_VALUE;
|
||||
variableSetStmt->name = name;
|
||||
variableSetStmt->args = list_make1(MakeSetStatementArgument(name, value));
|
||||
variableSetStmt->args = MakeSetStatementArguments(name, value);
|
||||
|
||||
return variableSetStmt;
|
||||
}
|
||||
|
@ -624,15 +626,15 @@ GetRoleNameFromDbRoleSetting(HeapTuple tuple, TupleDesc DbRoleSettingDescription
|
|||
|
||||
|
||||
/*
|
||||
* MakeSetStatementArgs parses a configuraton value and creates an A_Const
|
||||
* with an appropriate type.
|
||||
* MakeSetStatementArgs parses a configuraton value and creates an List of A_Const
|
||||
* Nodes with appropriate types.
|
||||
*
|
||||
* The allowed A_Const types are Integer, Float, and String.
|
||||
*/
|
||||
Node *
|
||||
MakeSetStatementArgument(char *configurationName, char *configurationValue)
|
||||
List *
|
||||
MakeSetStatementArguments(char *configurationName, char *configurationValue)
|
||||
{
|
||||
Node *arg = NULL;
|
||||
List *args = NIL;
|
||||
char **key = &configurationName;
|
||||
|
||||
/* Perform a lookup on GUC variables to find the config type and units.
|
||||
|
@ -668,13 +670,15 @@ MakeSetStatementArgument(char *configurationName, char *configurationValue)
|
|||
int intValue;
|
||||
parse_int(configurationValue, &intValue,
|
||||
(*matchingConfig)->flags, NULL);
|
||||
arg = makeIntConst(intValue, -1);
|
||||
Node *arg = makeIntConst(intValue, -1);
|
||||
args = lappend(args, arg);
|
||||
break;
|
||||
}
|
||||
|
||||
case PGC_REAL:
|
||||
{
|
||||
arg = makeFloatConst(configurationValue, -1);
|
||||
Node *arg = makeFloatConst(configurationValue, -1);
|
||||
args = lappend(args, arg);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -682,7 +686,25 @@ MakeSetStatementArgument(char *configurationName, char *configurationValue)
|
|||
case PGC_STRING:
|
||||
case PGC_ENUM:
|
||||
{
|
||||
arg = makeStringConst(configurationValue, -1);
|
||||
List *configurationList = NIL;
|
||||
|
||||
if ((*matchingConfig)->flags & GUC_LIST_INPUT)
|
||||
{
|
||||
char *configurationValueCopy = pstrdup(configurationValue);
|
||||
SplitIdentifierString(configurationValueCopy, ',',
|
||||
&configurationList);
|
||||
}
|
||||
else
|
||||
{
|
||||
configurationList = list_make1(configurationValue);
|
||||
}
|
||||
|
||||
char *configuration = NULL;
|
||||
foreach_ptr(configuration, configurationList)
|
||||
{
|
||||
Node *arg = makeStringConst(configuration, -1);
|
||||
args = lappend(args, arg);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -696,9 +718,10 @@ MakeSetStatementArgument(char *configurationName, char *configurationValue)
|
|||
}
|
||||
else
|
||||
{
|
||||
arg = makeStringConst(configurationValue, -1);
|
||||
Node *arg = makeStringConst(configurationValue, -1);
|
||||
args = lappend(args, arg);
|
||||
}
|
||||
return (Node *) arg;
|
||||
return args;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -148,7 +148,7 @@ PreprocessGrantOnSchemaStmt(Node *node, const char *queryString)
|
|||
|
||||
stmt->objects = originalObjects;
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, list_make1(sql));
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, list_make1(sql));
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -269,7 +269,10 @@ PostprocessAlterTableSchemaStmt(Node *node, const char *queryString)
|
|||
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
|
||||
Assert(stmt->objectType == OBJECT_TABLE);
|
||||
|
||||
ObjectAddress tableAddress = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
/*
|
||||
* We will let Postgres deal with missing_ok
|
||||
*/
|
||||
ObjectAddress tableAddress = GetObjectAddressFromParseTree((Node *) stmt, true);
|
||||
|
||||
if (!ShouldPropagate() || !IsCitusTable(tableAddress.objectId))
|
||||
{
|
||||
|
@ -1481,7 +1484,7 @@ AlterTableSchemaStmtObjectAddress(Node *node, bool missing_ok)
|
|||
if (stmt->relation->schemaname)
|
||||
{
|
||||
const char *schemaName = stmt->relation->schemaname;
|
||||
Oid schemaOid = get_namespace_oid(schemaName, false);
|
||||
Oid schemaOid = get_namespace_oid(schemaName, missing_ok);
|
||||
tableOid = get_relname_relid(tableName, schemaOid);
|
||||
}
|
||||
else
|
||||
|
|
|
@ -162,7 +162,7 @@ PreprocessCompositeTypeStmt(Node *node, const char *queryString)
|
|||
(void *) compositeTypeStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -228,7 +228,7 @@ PreprocessAlterTypeStmt(Node *node, const char *queryString)
|
|||
(void *) alterTypeStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -271,7 +271,7 @@ PreprocessCreateEnumStmt(Node *node, const char *queryString)
|
|||
(void *) createEnumStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -362,7 +362,7 @@ PreprocessAlterEnumStmt(Node *node, const char *queryString)
|
|||
(void *) alterEnumStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -500,7 +500,7 @@ PreprocessDropTypeStmt(Node *node, const char *queryString)
|
|||
dropStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -534,7 +534,7 @@ PreprocessRenameTypeStmt(Node *node, const char *queryString)
|
|||
(void *) renameStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -567,7 +567,7 @@ PreprocessRenameTypeAttributeStmt(Node *node, const char *queryString)
|
|||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -600,7 +600,7 @@ PreprocessAlterTypeSchemaStmt(Node *node, const char *queryString)
|
|||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -657,7 +657,7 @@ PreprocessAlterTypeOwnerStmt(Node *node, const char *queryString)
|
|||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -915,7 +915,7 @@ NodeDDLTaskList(TargetWorkerSet targets, List *commands)
|
|||
{
|
||||
/*
|
||||
* if there are no nodes we don't have to plan any ddl tasks. Planning them would
|
||||
* cause a hang in the executor.
|
||||
* cause the executor to stop responding.
|
||||
*/
|
||||
return NIL;
|
||||
}
|
||||
|
|
|
@ -127,7 +127,8 @@ PostprocessVariableSetStmt(VariableSetStmt *setStmt, const char *setStmtString)
|
|||
/* haven't seen any SET stmts so far in this (sub-)xact: initialize StringInfo */
|
||||
if (activeSetStmts == NULL)
|
||||
{
|
||||
MemoryContext old_context = MemoryContextSwitchTo(CurTransactionContext);
|
||||
/* see comments in PushSubXact on why we allocate this in TopTransactionContext */
|
||||
MemoryContext old_context = MemoryContextSwitchTo(TopTransactionContext);
|
||||
activeSetStmts = makeStringInfo();
|
||||
MemoryContextSwitchTo(old_context);
|
||||
}
|
||||
|
|
|
@ -160,6 +160,12 @@ AfterXactConnectionHandling(bool isCommit)
|
|||
hash_seq_init(&status, ConnectionHash);
|
||||
while ((entry = (ConnectionHashEntry *) hash_seq_search(&status)) != 0)
|
||||
{
|
||||
if (!entry->isValid)
|
||||
{
|
||||
/* skip invalid connection hash entries */
|
||||
continue;
|
||||
}
|
||||
|
||||
AfterXactHostConnectionHandling(entry, isCommit);
|
||||
|
||||
/*
|
||||
|
@ -323,11 +329,24 @@ StartNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port,
|
|||
*/
|
||||
|
||||
ConnectionHashEntry *entry = hash_search(ConnectionHash, &key, HASH_ENTER, &found);
|
||||
if (!found)
|
||||
if (!found || !entry->isValid)
|
||||
{
|
||||
/*
|
||||
* We are just building hash entry or previously it was left in an
|
||||
* invalid state as we couldn't allocate memory for it.
|
||||
* So initialize entry->connections list here.
|
||||
*/
|
||||
entry->isValid = false;
|
||||
entry->connections = MemoryContextAlloc(ConnectionContext,
|
||||
sizeof(dlist_head));
|
||||
dlist_init(entry->connections);
|
||||
|
||||
/*
|
||||
* If MemoryContextAlloc errors out -e.g. during an OOM-, entry->connections
|
||||
* stays as NULL. So entry->isValid should be set to true right after we
|
||||
* initialize entry->connections properly.
|
||||
*/
|
||||
entry->isValid = true;
|
||||
}
|
||||
|
||||
/* if desired, check whether there's a usable connection */
|
||||
|
@ -474,6 +493,12 @@ CloseAllConnectionsAfterTransaction(void)
|
|||
hash_seq_init(&status, ConnectionHash);
|
||||
while ((entry = (ConnectionHashEntry *) hash_seq_search(&status)) != 0)
|
||||
{
|
||||
if (!entry->isValid)
|
||||
{
|
||||
/* skip invalid connection hash entries */
|
||||
continue;
|
||||
}
|
||||
|
||||
dlist_iter iter;
|
||||
|
||||
dlist_head *connections = entry->connections;
|
||||
|
@ -502,6 +527,12 @@ CloseNodeConnectionsAfterTransaction(char *nodeName, int nodePort)
|
|||
hash_seq_init(&status, ConnectionHash);
|
||||
while ((entry = (ConnectionHashEntry *) hash_seq_search(&status)) != 0)
|
||||
{
|
||||
if (!entry->isValid)
|
||||
{
|
||||
/* skip invalid connection hash entries */
|
||||
continue;
|
||||
}
|
||||
|
||||
dlist_iter iter;
|
||||
|
||||
if (strcmp(entry->key.hostname, nodeName) != 0 || entry->key.port != nodePort)
|
||||
|
@ -577,6 +608,12 @@ ShutdownAllConnections(void)
|
|||
hash_seq_init(&status, ConnectionHash);
|
||||
while ((entry = (ConnectionHashEntry *) hash_seq_search(&status)) != NULL)
|
||||
{
|
||||
if (!entry->isValid)
|
||||
{
|
||||
/* skip invalid connection hash entries */
|
||||
continue;
|
||||
}
|
||||
|
||||
dlist_iter iter;
|
||||
|
||||
dlist_foreach(iter, entry->connections)
|
||||
|
@ -1187,6 +1224,12 @@ FreeConnParamsHashEntryFields(ConnParamsHashEntry *entry)
|
|||
static void
|
||||
AfterXactHostConnectionHandling(ConnectionHashEntry *entry, bool isCommit)
|
||||
{
|
||||
if (!entry || !entry->isValid)
|
||||
{
|
||||
/* callers only pass valid hash entries but let's be on the safe side */
|
||||
ereport(ERROR, (errmsg("connection hash entry is NULL or invalid")));
|
||||
}
|
||||
|
||||
dlist_mutable_iter iter;
|
||||
int cachedConnectionCount = 0;
|
||||
|
||||
|
|
|
@ -532,8 +532,9 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId)
|
|||
ereport(ERROR, (errmsg("bad number of tuple descriptor attributes")));
|
||||
}
|
||||
|
||||
AttrNumber natts = tupleDescriptor->natts;
|
||||
for (AttrNumber attributeIndex = 0;
|
||||
attributeIndex < (AttrNumber) tupleDescriptor->natts;
|
||||
attributeIndex < natts;
|
||||
attributeIndex++)
|
||||
{
|
||||
Form_pg_attribute attributeForm = TupleDescAttr(tupleDescriptor, attributeIndex);
|
||||
|
|
|
@ -56,5 +56,5 @@ QualifyVarSetCurrent(VariableSetStmt *setStmt)
|
|||
char *configValue = GetConfigOptionByName(configurationName, NULL, false);
|
||||
|
||||
setStmt->kind = VAR_SET_VALUE;
|
||||
setStmt->args = list_make1(MakeSetStatementArgument(configurationName, configValue));
|
||||
setStmt->args = list_make1(MakeSetStatementArguments(configurationName, configValue));
|
||||
}
|
||||
|
|
|
@ -966,6 +966,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
|
|||
int ncolumns;
|
||||
char **real_colnames;
|
||||
bool changed_any;
|
||||
bool has_anonymous;
|
||||
int noldcolumns;
|
||||
int i;
|
||||
int j;
|
||||
|
@ -1053,6 +1054,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
|
|||
*/
|
||||
noldcolumns = list_length(rte->eref->colnames);
|
||||
changed_any = false;
|
||||
has_anonymous = false;
|
||||
j = 0;
|
||||
for (i = 0; i < ncolumns; i++)
|
||||
{
|
||||
|
@ -1090,6 +1092,13 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
|
|||
/* Remember if any assigned aliases differ from "real" name */
|
||||
if (!changed_any && strcmp(colname, real_colname) != 0)
|
||||
changed_any = true;
|
||||
|
||||
/*
|
||||
* Remember if there is a reference to an anonymous column as named by
|
||||
* char * FigureColname(Node *node)
|
||||
*/
|
||||
if (!has_anonymous && strcmp(real_colname, "?column?") == 0)
|
||||
has_anonymous = true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1119,7 +1128,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
|
|||
else if (rte->alias && rte->alias->colnames != NIL)
|
||||
colinfo->printaliases = true;
|
||||
else
|
||||
colinfo->printaliases = changed_any;
|
||||
colinfo->printaliases = changed_any || has_anonymous;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3036,7 +3045,7 @@ get_insert_query_def(Query *query, deparse_context *context)
|
|||
/* INSERT requires AS keyword for target alias */
|
||||
if (rte->alias != NULL)
|
||||
appendStringInfo(buf, "AS %s ",
|
||||
quote_identifier(rte->alias->aliasname));
|
||||
quote_identifier(get_rtable_name(query->resultRelation, context)));
|
||||
|
||||
/*
|
||||
* Add the insert-column-names list. Any indirection decoration needed on
|
||||
|
@ -3235,7 +3244,7 @@ get_update_query_def(Query *query, deparse_context *context)
|
|||
|
||||
if(rte->eref != NULL)
|
||||
appendStringInfo(buf, " %s",
|
||||
quote_identifier(rte->eref->aliasname));
|
||||
quote_identifier(get_rtable_name(query->resultRelation, context)));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -3247,7 +3256,7 @@ get_update_query_def(Query *query, deparse_context *context)
|
|||
|
||||
if (rte->alias != NULL)
|
||||
appendStringInfo(buf, " %s",
|
||||
quote_identifier(rte->alias->aliasname));
|
||||
quote_identifier(get_rtable_name(query->resultRelation, context)));
|
||||
}
|
||||
|
||||
appendStringInfoString(buf, " SET ");
|
||||
|
@ -3467,7 +3476,7 @@ get_delete_query_def(Query *query, deparse_context *context)
|
|||
|
||||
if(rte->eref != NULL)
|
||||
appendStringInfo(buf, " %s",
|
||||
quote_identifier(rte->eref->aliasname));
|
||||
quote_identifier(get_rtable_name(query->resultRelation, context)));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -3479,7 +3488,7 @@ get_delete_query_def(Query *query, deparse_context *context)
|
|||
|
||||
if (rte->alias != NULL)
|
||||
appendStringInfo(buf, " %s",
|
||||
quote_identifier(rte->alias->aliasname));
|
||||
quote_identifier(get_rtable_name(query->resultRelation, context)));
|
||||
}
|
||||
|
||||
/* Add the USING clause if given */
|
||||
|
|
|
@ -966,6 +966,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
|
|||
int ncolumns;
|
||||
char **real_colnames;
|
||||
bool changed_any;
|
||||
bool has_anonymous;
|
||||
int noldcolumns;
|
||||
int i;
|
||||
int j;
|
||||
|
@ -1053,6 +1054,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
|
|||
*/
|
||||
noldcolumns = list_length(rte->eref->colnames);
|
||||
changed_any = false;
|
||||
has_anonymous = false;
|
||||
j = 0;
|
||||
for (i = 0; i < ncolumns; i++)
|
||||
{
|
||||
|
@ -1090,6 +1092,13 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
|
|||
/* Remember if any assigned aliases differ from "real" name */
|
||||
if (!changed_any && strcmp(colname, real_colname) != 0)
|
||||
changed_any = true;
|
||||
|
||||
/*
|
||||
* Remember if there is a reference to an anonymous column as named by
|
||||
* char * FigureColname(Node *node)
|
||||
*/
|
||||
if (!has_anonymous && strcmp(real_colname, "?column?") == 0)
|
||||
has_anonymous = true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1119,7 +1128,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
|
|||
else if (rte->alias && rte->alias->colnames != NIL)
|
||||
colinfo->printaliases = true;
|
||||
else
|
||||
colinfo->printaliases = changed_any;
|
||||
colinfo->printaliases = changed_any || has_anonymous;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3048,7 +3057,7 @@ get_insert_query_def(Query *query, deparse_context *context)
|
|||
/* INSERT requires AS keyword for target alias */
|
||||
if (rte->alias != NULL)
|
||||
appendStringInfo(buf, "AS %s ",
|
||||
quote_identifier(rte->alias->aliasname));
|
||||
quote_identifier(get_rtable_name(query->resultRelation, context)));
|
||||
|
||||
/*
|
||||
* Add the insert-column-names list. Any indirection decoration needed on
|
||||
|
@ -3247,7 +3256,7 @@ get_update_query_def(Query *query, deparse_context *context)
|
|||
|
||||
if(rte->eref != NULL)
|
||||
appendStringInfo(buf, " %s",
|
||||
quote_identifier(rte->eref->aliasname));
|
||||
quote_identifier(get_rtable_name(query->resultRelation, context)));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -3259,7 +3268,7 @@ get_update_query_def(Query *query, deparse_context *context)
|
|||
|
||||
if (rte->alias != NULL)
|
||||
appendStringInfo(buf, " %s",
|
||||
quote_identifier(rte->alias->aliasname));
|
||||
quote_identifier(get_rtable_name(query->resultRelation, context)));
|
||||
}
|
||||
|
||||
appendStringInfoString(buf, " SET ");
|
||||
|
@ -3479,7 +3488,7 @@ get_delete_query_def(Query *query, deparse_context *context)
|
|||
|
||||
if(rte->eref != NULL)
|
||||
appendStringInfo(buf, " %s",
|
||||
quote_identifier(rte->eref->aliasname));
|
||||
quote_identifier(get_rtable_name(query->resultRelation, context)));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -3491,7 +3500,7 @@ get_delete_query_def(Query *query, deparse_context *context)
|
|||
|
||||
if (rte->alias != NULL)
|
||||
appendStringInfo(buf, " %s",
|
||||
quote_identifier(rte->alias->aliasname));
|
||||
quote_identifier(get_rtable_name(query->resultRelation, context)));
|
||||
}
|
||||
|
||||
/* Add the USING clause if given */
|
||||
|
|
|
@ -442,7 +442,7 @@ struct TaskPlacementExecution;
|
|||
/* GUC, determining whether Citus opens 1 connection per task */
|
||||
bool ForceMaxQueryParallelization = false;
|
||||
int MaxAdaptiveExecutorPoolSize = 16;
|
||||
bool EnableBinaryProtocol = true;
|
||||
bool EnableBinaryProtocol = false;
|
||||
|
||||
/* GUC, number of ms to wait between opening connections to the same worker */
|
||||
int ExecutorSlowStartInterval = 10;
|
||||
|
@ -656,6 +656,16 @@ static void SetAttributeInputMetadata(DistributedExecution *execution,
|
|||
void
|
||||
AdaptiveExecutorPreExecutorRun(CitusScanState *scanState)
|
||||
{
|
||||
if (scanState->finishedPreScan)
|
||||
{
|
||||
/*
|
||||
* Cursors (and hence RETURN QUERY syntax in pl/pgsql functions)
|
||||
* may trigger AdaptiveExecutorPreExecutorRun() on every fetch
|
||||
* operation. Though, we should only execute PreScan once.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
DistributedPlan *distributedPlan = scanState->distributedPlan;
|
||||
|
||||
/*
|
||||
|
@ -666,6 +676,8 @@ AdaptiveExecutorPreExecutorRun(CitusScanState *scanState)
|
|||
LockPartitionsForDistributedPlan(distributedPlan);
|
||||
|
||||
ExecuteSubPlans(distributedPlan);
|
||||
|
||||
scanState->finishedPreScan = true;
|
||||
}
|
||||
|
||||
|
||||
|
@ -694,13 +706,7 @@ AdaptiveExecutor(CitusScanState *scanState)
|
|||
Assert(!scanState->finishedRemoteScan);
|
||||
|
||||
/* Reset Task fields that are only valid for a single execution */
|
||||
Task *task = NULL;
|
||||
foreach_ptr(task, taskList)
|
||||
{
|
||||
task->totalReceivedTupleData = 0;
|
||||
task->fetchedExplainAnalyzePlacementIndex = 0;
|
||||
task->fetchedExplainAnalyzePlan = NULL;
|
||||
}
|
||||
ResetExplainAnalyzeData(taskList);
|
||||
|
||||
scanState->tuplestorestate =
|
||||
tuplestore_begin_heap(randomAccess, interTransactions, work_mem);
|
||||
|
|
|
@ -297,7 +297,7 @@ CitusBeginReadOnlyScan(CustomScanState *node, EState *estate, int eflags)
|
|||
*
|
||||
* TODO: evaluate stable functions
|
||||
*/
|
||||
ExecuteMasterEvaluableExpressions(jobQuery, planState);
|
||||
ExecuteCoordinatorEvaluableExpressions(jobQuery, planState);
|
||||
|
||||
/* job query no longer has parameters, so we should not send any */
|
||||
workerJob->parametersInJobQueryResolved = true;
|
||||
|
@ -347,7 +347,7 @@ CitusBeginModifyScan(CustomScanState *node, EState *estate, int eflags)
|
|||
|
||||
if (ModifyJobNeedsEvaluation(workerJob))
|
||||
{
|
||||
ExecuteMasterEvaluableExpressions(jobQuery, planState);
|
||||
ExecuteCoordinatorEvaluableExpressions(jobQuery, planState);
|
||||
|
||||
/* job query no longer has parameters, so we should not send any */
|
||||
workerJob->parametersInJobQueryResolved = true;
|
||||
|
@ -375,7 +375,7 @@ CitusBeginModifyScan(CustomScanState *node, EState *estate, int eflags)
|
|||
RegenerateTaskForFasthPathQuery(workerJob);
|
||||
}
|
||||
}
|
||||
else if (workerJob->requiresMasterEvaluation)
|
||||
else if (workerJob->requiresCoordinatorEvaluation)
|
||||
{
|
||||
/*
|
||||
* When there is no deferred pruning, but we did evaluate functions, then
|
||||
|
@ -428,7 +428,7 @@ CitusBeginModifyScan(CustomScanState *node, EState *estate, int eflags)
|
|||
static bool
|
||||
ModifyJobNeedsEvaluation(Job *workerJob)
|
||||
{
|
||||
if (workerJob->requiresMasterEvaluation)
|
||||
if (workerJob->requiresCoordinatorEvaluation)
|
||||
{
|
||||
/* query contains functions that need to be evaluated on the coordinator */
|
||||
return true;
|
||||
|
@ -575,6 +575,9 @@ AdaptiveExecutorCreateScan(CustomScan *scan)
|
|||
scanState->customScanState.methods = &AdaptiveExecutorCustomExecMethods;
|
||||
scanState->PreExecScan = &CitusPreExecScan;
|
||||
|
||||
scanState->finishedPreScan = false;
|
||||
scanState->finishedRemoteScan = false;
|
||||
|
||||
return (Node *) scanState;
|
||||
}
|
||||
|
||||
|
@ -613,6 +616,9 @@ NonPushableInsertSelectCreateScan(CustomScan *scan)
|
|||
scanState->customScanState.methods =
|
||||
&NonPushableInsertSelectCustomExecMethods;
|
||||
|
||||
scanState->finishedPreScan = false;
|
||||
scanState->finishedRemoteScan = false;
|
||||
|
||||
return (Node *) scanState;
|
||||
}
|
||||
|
||||
|
|
|
@ -136,7 +136,7 @@ broadcast_intermediate_result(PG_FUNCTION_ARGS)
|
|||
*/
|
||||
UseCoordinatedTransaction();
|
||||
|
||||
List *nodeList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
List *nodeList = ActivePrimaryNonCoordinatorNodeList(NoLock);
|
||||
EState *estate = CreateExecutorState();
|
||||
RemoteFileDestReceiver *resultDest =
|
||||
(RemoteFileDestReceiver *) CreateRemoteFileDestReceiver(resultIdString,
|
||||
|
|
|
@ -118,8 +118,7 @@ JobExecutorType(DistributedPlan *distributedPlan)
|
|||
}
|
||||
else
|
||||
{
|
||||
List *workerNodeList = ActiveReadableWorkerNodeList();
|
||||
int workerNodeCount = list_length(workerNodeList);
|
||||
int workerNodeCount = list_length(ActiveReadableNodeList());
|
||||
int taskCount = list_length(job->taskList);
|
||||
double tasksPerNode = taskCount / ((double) workerNodeCount);
|
||||
|
||||
|
|
|
@ -209,7 +209,7 @@ MultiTaskTrackerExecute(Job *job)
|
|||
* assigning and checking the status of tasks. The second (temporary) hash
|
||||
* helps us in fetching results data from worker nodes to the master node.
|
||||
*/
|
||||
List *workerNodeList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
List *workerNodeList = ActivePrimaryNodeList(ShareLock);
|
||||
uint32 taskTrackerCount = (uint32) list_length(workerNodeList);
|
||||
|
||||
/* connect as the current user for running queries */
|
||||
|
|
|
@ -104,7 +104,7 @@ CreateTemporarySchemasForMergeTasks(Job *topLeveLJob)
|
|||
{
|
||||
List *jobIds = ExtractJobsInJobTree(topLeveLJob);
|
||||
char *createSchemasCommand = GenerateCreateSchemasCommand(jobIds, CurrentUserName());
|
||||
SendCommandToWorkersInParallel(ALL_WORKERS, createSchemasCommand,
|
||||
SendCommandToWorkersInParallel(ALL_SHARD_NODES, createSchemasCommand,
|
||||
CitusExtensionOwnerName());
|
||||
return jobIds;
|
||||
}
|
||||
|
@ -191,7 +191,8 @@ GenerateJobCommands(List *jobIds, char *templateCommand)
|
|||
void
|
||||
DoRepartitionCleanup(List *jobIds)
|
||||
{
|
||||
SendCommandToWorkersOptionalInParallel(ALL_WORKERS, GenerateDeleteJobsCommand(jobIds),
|
||||
SendCommandToWorkersOptionalInParallel(ALL_SHARD_NODES, GenerateDeleteJobsCommand(
|
||||
jobIds),
|
||||
CitusExtensionOwnerName());
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
#include "executor/executor.h"
|
||||
#include "utils/datetime.h"
|
||||
|
||||
#define SECOND_TO_MILLI_SECOND 1000
|
||||
#define MICRO_TO_MILLI_SECOND 0.001
|
||||
|
||||
int MaxIntermediateResult = 1048576; /* maximum size in KB the intermediate result can grow to */
|
||||
/* when this is true, we enforce intermediate result size limit in all executors */
|
||||
|
@ -86,7 +88,9 @@ ExecuteSubPlans(DistributedPlan *distributedPlan)
|
|||
int durationMicrosecs = 0;
|
||||
TimestampDifference(startTimestamp, GetCurrentTimestamp(), &durationSeconds,
|
||||
&durationMicrosecs);
|
||||
subPlan->durationMillisecs = durationSeconds * 1000 * +durationMicrosecs * 10e-3;
|
||||
|
||||
subPlan->durationMillisecs = durationSeconds * SECOND_TO_MILLI_SECOND;
|
||||
subPlan->durationMillisecs += durationMicrosecs * MICRO_TO_MILLI_SECOND;
|
||||
|
||||
subPlan->bytesSentPerWorker = RemoteFileDestReceiverBytesSent(copyDest);
|
||||
subPlan->remoteWorkerCount = list_length(remoteWorkerNodeList);
|
||||
|
|
|
@ -282,7 +282,7 @@ EnsureModificationsCanRun(void)
|
|||
if (RecoveryInProgress() && !WritableStandbyCoordinator)
|
||||
{
|
||||
ereport(ERROR, (errmsg("writing to worker nodes is not currently allowed"),
|
||||
errdetail("the database is in recovery mode")));
|
||||
errdetail("the database is read-only")));
|
||||
}
|
||||
|
||||
if (ReadFromSecondaries == USE_SECONDARY_NODES_ALWAYS)
|
||||
|
@ -1422,12 +1422,12 @@ HasUniformHashDistribution(ShardInterval **shardIntervalArray,
|
|||
for (int shardIndex = 0; shardIndex < shardIntervalArrayLength; shardIndex++)
|
||||
{
|
||||
ShardInterval *shardInterval = shardIntervalArray[shardIndex];
|
||||
int32 shardMinHashToken = INT32_MIN + (shardIndex * hashTokenIncrement);
|
||||
int32 shardMinHashToken = PG_INT32_MIN + (shardIndex * hashTokenIncrement);
|
||||
int32 shardMaxHashToken = shardMinHashToken + (hashTokenIncrement - 1);
|
||||
|
||||
if (shardIndex == (shardIntervalArrayLength - 1))
|
||||
{
|
||||
shardMaxHashToken = INT32_MAX;
|
||||
shardMaxHashToken = PG_INT32_MAX;
|
||||
}
|
||||
|
||||
if (DatumGetInt32(shardInterval->minValue) != shardMinHashToken ||
|
||||
|
|
|
@ -1254,7 +1254,7 @@ SchemaOwnerName(Oid objectId)
|
|||
static bool
|
||||
HasMetadataWorkers(void)
|
||||
{
|
||||
List *workerNodeList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(NoLock);
|
||||
|
||||
WorkerNode *workerNode = NULL;
|
||||
foreach_ptr(workerNode, workerNodeList)
|
||||
|
@ -1373,7 +1373,7 @@ SyncMetadataToNodes(void)
|
|||
return METADATA_SYNC_FAILED_LOCK;
|
||||
}
|
||||
|
||||
List *workerList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock);
|
||||
WorkerNode *workerNode = NULL;
|
||||
foreach_ptr(workerNode, workerList)
|
||||
{
|
||||
|
|
|
@ -117,7 +117,7 @@ OpenConnectionsToAllWorkerNodes(LOCKMODE lockMode)
|
|||
List *connectionList = NIL;
|
||||
int connectionFlags = FORCE_NEW_CONNECTION;
|
||||
|
||||
List *workerNodeList = ActivePrimaryWorkerNodeList(lockMode);
|
||||
List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(lockMode);
|
||||
|
||||
WorkerNode *workerNode = NULL;
|
||||
foreach_ptr(workerNode, workerNodeList)
|
||||
|
|
|
@ -200,14 +200,14 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount,
|
|||
uint32 roundRobinNodeIndex = shardIndex % workerNodeCount;
|
||||
|
||||
/* initialize the hash token space for this shard */
|
||||
int32 shardMinHashToken = INT32_MIN + (shardIndex * hashTokenIncrement);
|
||||
int32 shardMinHashToken = PG_INT32_MIN + (shardIndex * hashTokenIncrement);
|
||||
int32 shardMaxHashToken = shardMinHashToken + (hashTokenIncrement - 1);
|
||||
uint64 shardId = GetNextShardId();
|
||||
|
||||
/* if we are at the last shard, make sure the max token value is INT_MAX */
|
||||
if (shardIndex == (shardCount - 1))
|
||||
{
|
||||
shardMaxHashToken = INT32_MAX;
|
||||
shardMaxHashToken = PG_INT32_MAX;
|
||||
}
|
||||
|
||||
/* insert the shard metadata row along with its min/max values */
|
||||
|
|
|
@ -457,7 +457,7 @@ master_get_active_worker_nodes(PG_FUNCTION_ARGS)
|
|||
MemoryContext oldContext = MemoryContextSwitchTo(
|
||||
functionContext->multi_call_memory_ctx);
|
||||
|
||||
List *workerNodeList = ActiveReadableWorkerNodeList();
|
||||
List *workerNodeList = ActiveReadableNonCoordinatorNodeList();
|
||||
workerNodeCount = (uint32) list_length(workerNodeList);
|
||||
|
||||
functionContext->user_fctx = workerNodeList;
|
||||
|
|
|
@ -293,12 +293,13 @@ WorkerGetNodeWithName(const char *hostname)
|
|||
|
||||
|
||||
/*
|
||||
* ActivePrimaryWorkerNodeCount returns the number of groups with a primary in the cluster.
|
||||
* ActivePrimaryNonCoordinatorNodeCount returns the number of groups with a primary in the cluster.
|
||||
* This method excludes coordinator even if it is added as a worker to cluster.
|
||||
*/
|
||||
uint32
|
||||
ActivePrimaryWorkerNodeCount(void)
|
||||
ActivePrimaryNonCoordinatorNodeCount(void)
|
||||
{
|
||||
List *workerNodeList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(NoLock);
|
||||
uint32 liveWorkerCount = list_length(workerNodeList);
|
||||
|
||||
return liveWorkerCount;
|
||||
|
@ -306,12 +307,13 @@ ActivePrimaryWorkerNodeCount(void)
|
|||
|
||||
|
||||
/*
|
||||
* ActiveReadableWorkerNodeCount returns the number of groups with a node we can read from.
|
||||
* ActiveReadableNonCoordinatorNodeCount returns the number of groups with a node we can read from.
|
||||
* This method excludes coordinator even if it is added as a worker.
|
||||
*/
|
||||
uint32
|
||||
ActiveReadableWorkerNodeCount(void)
|
||||
ActiveReadableNonCoordinatorNodeCount(void)
|
||||
{
|
||||
List *workerNodeList = ActiveReadableWorkerNodeList();
|
||||
List *workerNodeList = ActiveReadableNonCoordinatorNodeList();
|
||||
uint32 liveWorkerCount = list_length(workerNodeList);
|
||||
|
||||
return liveWorkerCount;
|
||||
|
@ -366,13 +368,14 @@ FilterActiveNodeListFunc(LOCKMODE lockMode, bool (*checkFunction)(WorkerNode *))
|
|||
|
||||
|
||||
/*
|
||||
* ActivePrimaryWorkerNodeList returns a list of all active primary worker nodes
|
||||
* ActivePrimaryNonCoordinatorNodeList returns a list of all active primary worker nodes
|
||||
* in workerNodeHash. lockMode specifies which lock to use on pg_dist_node,
|
||||
* this is necessary when the caller wouldn't want nodes to be added concurrent
|
||||
* to their use of this list.
|
||||
* This method excludes coordinator even if it is added as a worker to cluster.
|
||||
*/
|
||||
List *
|
||||
ActivePrimaryWorkerNodeList(LOCKMODE lockMode)
|
||||
ActivePrimaryNonCoordinatorNodeList(LOCKMODE lockMode)
|
||||
{
|
||||
EnsureModificationsCanRun();
|
||||
return FilterActiveNodeListFunc(lockMode, NodeIsPrimaryWorker);
|
||||
|
@ -443,11 +446,11 @@ NodeCanHaveDistTablePlacements(WorkerNode *node)
|
|||
|
||||
|
||||
/*
|
||||
* ActiveReadableWorkerNodeList returns a list of all nodes in workerNodeHash
|
||||
* that are readable workers.
|
||||
* ActiveReadableNonCoordinatorNodeList returns a list of all nodes in workerNodeHash
|
||||
* that are readable nodes This method excludes coordinator.
|
||||
*/
|
||||
List *
|
||||
ActiveReadableWorkerNodeList(void)
|
||||
ActiveReadableNonCoordinatorNodeList(void)
|
||||
{
|
||||
return FilterActiveNodeListFunc(NoLock, NodeIsReadableWorker);
|
||||
}
|
||||
|
@ -456,6 +459,7 @@ ActiveReadableWorkerNodeList(void)
|
|||
/*
|
||||
* ActiveReadableNodeList returns a list of all nodes in workerNodeHash
|
||||
* that are readable workers.
|
||||
* This method includes coordinator if it is added as a worker to the cluster.
|
||||
*/
|
||||
List *
|
||||
ActiveReadableNodeList(void)
|
||||
|
@ -602,7 +606,7 @@ WorkerNodeCompare(const void *lhsKey, const void *rhsKey, Size keySize)
|
|||
WorkerNode *
|
||||
GetFirstPrimaryWorkerNode(void)
|
||||
{
|
||||
List *workerNodeList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(NoLock);
|
||||
WorkerNode *firstWorkerNode = NULL;
|
||||
WorkerNode *workerNode = NULL;
|
||||
foreach_ptr(workerNode, workerNodeList)
|
||||
|
|
|
@ -50,7 +50,7 @@ static CustomPathMethods CitusCustomScanPathMethods = {
|
|||
};
|
||||
|
||||
/*
|
||||
* MasterNodeSelectPlan takes in a distributed plan and a custom scan node which
|
||||
* PlanCombineQuery takes in a distributed plan and a custom scan node which
|
||||
* wraps remote part of the plan. This function finds the combine query structure
|
||||
* in the multi plan, and builds the final select plan to execute on the tuples
|
||||
* returned by remote scan on the coordinator node. Note that this select
|
||||
|
@ -58,7 +58,7 @@ static CustomPathMethods CitusCustomScanPathMethods = {
|
|||
* filled into the tuple store inside provided custom scan.
|
||||
*/
|
||||
PlannedStmt *
|
||||
MasterNodeSelectPlan(DistributedPlan *distributedPlan, CustomScan *remoteScan)
|
||||
PlanCombineQuery(DistributedPlan *distributedPlan, CustomScan *remoteScan)
|
||||
{
|
||||
Query *combineQuery = distributedPlan->combineQuery;
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ RebuildQueryStrings(Job *workerJob)
|
|||
|
||||
query = copyObject(originalQuery);
|
||||
|
||||
RangeTblEntry *copiedInsertRte = ExtractResultRelationRTE(query);
|
||||
RangeTblEntry *copiedInsertRte = ExtractResultRelationRTEOrError(query);
|
||||
RangeTblEntry *copiedSubqueryRte = ExtractSelectRangeTableEntry(query);
|
||||
Query *copiedSubquery = copiedSubqueryRte->subquery;
|
||||
|
||||
|
|
|
@ -1368,7 +1368,7 @@ static PlannedStmt *
|
|||
FinalizeNonRouterPlan(PlannedStmt *localPlan, DistributedPlan *distributedPlan,
|
||||
CustomScan *customScan)
|
||||
{
|
||||
PlannedStmt *finalPlan = MasterNodeSelectPlan(distributedPlan, customScan);
|
||||
PlannedStmt *finalPlan = PlanCombineQuery(distributedPlan, customScan);
|
||||
finalPlan->queryId = localPlan->queryId;
|
||||
finalPlan->utilityStmt = localPlan->utilityStmt;
|
||||
|
||||
|
|
|
@ -275,7 +275,7 @@ CreateDistributedInsertSelectPlan(Query *originalQuery,
|
|||
uint32 taskIdIndex = 1; /* 0 is reserved for invalid taskId */
|
||||
uint64 jobId = INVALID_JOB_ID;
|
||||
DistributedPlan *distributedPlan = CitusMakeNode(DistributedPlan);
|
||||
RangeTblEntry *insertRte = ExtractResultRelationRTE(originalQuery);
|
||||
RangeTblEntry *insertRte = ExtractResultRelationRTEOrError(originalQuery);
|
||||
RangeTblEntry *subqueryRte = ExtractSelectRangeTableEntry(originalQuery);
|
||||
Oid targetRelationId = insertRte->relid;
|
||||
CitusTableCacheEntry *targetCacheEntry = GetCitusTableCacheEntry(targetRelationId);
|
||||
|
@ -348,7 +348,8 @@ CreateDistributedInsertSelectPlan(Query *originalQuery,
|
|||
workerJob->dependentJobList = NIL;
|
||||
workerJob->jobId = jobId;
|
||||
workerJob->jobQuery = originalQuery;
|
||||
workerJob->requiresMasterEvaluation = RequiresMasterEvaluation(originalQuery);
|
||||
workerJob->requiresCoordinatorEvaluation =
|
||||
RequiresCoordinatorEvaluation(originalQuery);
|
||||
|
||||
/* and finally the multi plan */
|
||||
distributedPlan->workerJob = workerJob;
|
||||
|
@ -648,7 +649,7 @@ RouterModifyTaskForShardInterval(Query *originalQuery,
|
|||
DeferredErrorMessage **routerPlannerError)
|
||||
{
|
||||
Query *copiedQuery = copyObject(originalQuery);
|
||||
RangeTblEntry *copiedInsertRte = ExtractResultRelationRTE(copiedQuery);
|
||||
RangeTblEntry *copiedInsertRte = ExtractResultRelationRTEOrError(copiedQuery);
|
||||
RangeTblEntry *copiedSubqueryRte = ExtractSelectRangeTableEntry(copiedQuery);
|
||||
Query *copiedSubquery = (Query *) copiedSubqueryRte->subquery;
|
||||
|
||||
|
@ -1343,7 +1344,7 @@ CreateNonPushableInsertSelectPlan(uint64 planId, Query *parse, ParamListInfo bou
|
|||
Query *insertSelectQuery = copyObject(parse);
|
||||
|
||||
RangeTblEntry *selectRte = ExtractSelectRangeTableEntry(insertSelectQuery);
|
||||
RangeTblEntry *insertRte = ExtractResultRelationRTE(insertSelectQuery);
|
||||
RangeTblEntry *insertRte = ExtractResultRelationRTEOrError(insertSelectQuery);
|
||||
Oid targetRelationId = insertRte->relid;
|
||||
|
||||
DistributedPlan *distributedPlan = CitusMakeNode(DistributedPlan);
|
||||
|
|
|
@ -151,7 +151,7 @@ RecordSubplanExecutionsOnNodes(HTAB *intermediateResultsHash,
|
|||
List *usedSubPlanNodeList = distributedPlan->usedSubPlanNodeList;
|
||||
List *subPlanList = distributedPlan->subPlanList;
|
||||
ListCell *subPlanCell = NULL;
|
||||
int workerNodeCount = ActiveReadableWorkerNodeCount();
|
||||
int workerNodeCount = ActiveReadableNonCoordinatorNodeCount();
|
||||
|
||||
foreach(subPlanCell, usedSubPlanNodeList)
|
||||
{
|
||||
|
@ -269,7 +269,7 @@ AppendAllAccessedWorkerNodes(IntermediateResultsHashEntry *entry,
|
|||
static void
|
||||
AppendAllWorkerNodes(IntermediateResultsHashEntry *entry)
|
||||
{
|
||||
List *workerNodeList = ActiveReadableWorkerNodeList();
|
||||
List *workerNodeList = ActiveReadableNonCoordinatorNodeList();
|
||||
|
||||
WorkerNode *workerNode = NULL;
|
||||
foreach_ptr(workerNode, workerNodeList)
|
||||
|
|
|
@ -190,7 +190,7 @@ IsLocalPlanCachingSupported(Job *currentJob, DistributedPlan *originalDistribute
|
|||
* We do not cache plans with volatile functions in the query.
|
||||
*
|
||||
* The reason we care about volatile functions is primarily that we
|
||||
* already executed them in ExecuteMasterEvaluableExpressions
|
||||
* already executed them in ExecuteCoordinatorEvaluableExpressions
|
||||
* and since we're falling back to the original query tree here we would
|
||||
* execute them again if we execute the plan.
|
||||
*/
|
||||
|
|
|
@ -147,7 +147,7 @@ static void ExplainAnalyzeDestPutTuple(TupleDestination *self, Task *task,
|
|||
static TupleDesc ExplainAnalyzeDestTupleDescForQuery(TupleDestination *self, int
|
||||
queryNumber);
|
||||
static char * WrapQueryForExplainAnalyze(const char *queryString, TupleDesc tupleDesc);
|
||||
static List * SplitString(const char *str, char delimiter);
|
||||
static List * SplitString(const char *str, char delimiter, int maxLength);
|
||||
|
||||
/* Static Explain functions copied from explain.c */
|
||||
static void ExplainOneQuery(Query *query, int cursorOptions,
|
||||
|
@ -576,8 +576,11 @@ GetSavedRemoteExplain(Task *task, ExplainState *es)
|
|||
*/
|
||||
if (es->format == EXPLAIN_FORMAT_TEXT)
|
||||
{
|
||||
/*
|
||||
* We limit the size of EXPLAIN plans to RSIZE_MAX_MEM (256MB).
|
||||
*/
|
||||
remotePlan->explainOutputList = SplitString(task->fetchedExplainAnalyzePlan,
|
||||
'\n');
|
||||
'\n', RSIZE_MAX_MEM);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -957,7 +960,7 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS)
|
|||
|
||||
INSTR_TIME_SET_CURRENT(planStart);
|
||||
|
||||
PlannedStmt *plan = pg_plan_query(query, 0, NULL);
|
||||
PlannedStmt *plan = pg_plan_query(query, CURSOR_OPT_PARALLEL_OK, NULL);
|
||||
|
||||
INSTR_TIME_SET_CURRENT(planDuration);
|
||||
INSTR_TIME_SUBTRACT(planDuration, planStart);
|
||||
|
@ -1194,8 +1197,26 @@ ExplainAnalyzeDestPutTuple(TupleDestination *self, Task *task,
|
|||
|
||||
char *fetchedExplainAnalyzePlan = TextDatumGetCString(explainAnalyze);
|
||||
|
||||
/*
|
||||
* Allocate fetchedExplainAnalyzePlan in the same context as the Task, since we are
|
||||
* currently in execution context and a Task can span multiple executions.
|
||||
*
|
||||
* Although we won't reuse the same value in a future execution, but we have
|
||||
* calls to CheckNodeCopyAndSerialization() which asserts copy functions of the task
|
||||
* work as expected, which will try to copy this value in a future execution.
|
||||
*
|
||||
* Why we don't we just allocate this field in executor context and reset it before
|
||||
* the next execution? Because when an error is raised we can skip pretty much most
|
||||
* of the meaningful places that we can insert the reset.
|
||||
*
|
||||
* TODO: Take all EXPLAIN ANALYZE related fields out of Task and store them in a
|
||||
* Task to ExplainAnalyzePrivate mapping in multi_explain.c, so we don't need to
|
||||
* do these hacky memory context management tricks.
|
||||
*/
|
||||
MemoryContext taskContext = GetMemoryChunkContext(tupleDestination->originalTask);
|
||||
|
||||
tupleDestination->originalTask->fetchedExplainAnalyzePlan =
|
||||
pstrdup(fetchedExplainAnalyzePlan);
|
||||
MemoryContextStrdup(taskContext, fetchedExplainAnalyzePlan);
|
||||
tupleDestination->originalTask->fetchedExplainAnalyzePlacementIndex =
|
||||
placementIndex;
|
||||
}
|
||||
|
@ -1207,6 +1228,27 @@ ExplainAnalyzeDestPutTuple(TupleDestination *self, Task *task,
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* ResetExplainAnalyzeData reset fields in Task that are used by multi_explain.c
|
||||
*/
|
||||
void
|
||||
ResetExplainAnalyzeData(List *taskList)
|
||||
{
|
||||
Task *task = NULL;
|
||||
foreach_ptr(task, taskList)
|
||||
{
|
||||
if (task->fetchedExplainAnalyzePlan != NULL)
|
||||
{
|
||||
pfree(task->fetchedExplainAnalyzePlan);
|
||||
}
|
||||
|
||||
task->totalReceivedTupleData = 0;
|
||||
task->fetchedExplainAnalyzePlacementIndex = 0;
|
||||
task->fetchedExplainAnalyzePlan = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ExplainAnalyzeDestTupleDescForQuery implements TupleDestination->tupleDescForQuery
|
||||
* for ExplainAnalyzeDestination.
|
||||
|
@ -1353,9 +1395,9 @@ WrapQueryForExplainAnalyze(const char *queryString, TupleDesc tupleDesc)
|
|||
* it isn't safe if by any chance str is not null-terminated.
|
||||
*/
|
||||
static List *
|
||||
SplitString(const char *str, char delimiter)
|
||||
SplitString(const char *str, char delimiter, int maxLength)
|
||||
{
|
||||
size_t len = strnlen_s(str, RSIZE_MAX_STR);
|
||||
size_t len = strnlen(str, maxLength);
|
||||
if (len == 0)
|
||||
{
|
||||
return NIL;
|
||||
|
|
|
@ -165,7 +165,7 @@ static Task * QueryPushdownTaskCreate(Query *originalQuery, int shardIndex,
|
|||
RelationRestrictionContext *restrictionContext,
|
||||
uint32 taskId,
|
||||
TaskType taskType,
|
||||
bool modifyRequiresMasterEvaluation);
|
||||
bool modifyRequiresCoordinatorEvaluation);
|
||||
static bool ShardIntervalsEqual(FmgrInfo *comparisonFunction,
|
||||
Oid collation,
|
||||
ShardInterval *firstInterval,
|
||||
|
@ -2015,7 +2015,7 @@ BuildJob(Query *jobQuery, List *dependentJobList)
|
|||
job->jobId = UniqueJobId();
|
||||
job->jobQuery = jobQuery;
|
||||
job->dependentJobList = dependentJobList;
|
||||
job->requiresMasterEvaluation = false;
|
||||
job->requiresCoordinatorEvaluation = false;
|
||||
|
||||
return job;
|
||||
}
|
||||
|
@ -2107,7 +2107,7 @@ BuildMapMergeJob(Query *jobQuery, List *dependentJobList, Var *partitionKey,
|
|||
static uint32
|
||||
HashPartitionCount(void)
|
||||
{
|
||||
uint32 groupCount = ActiveReadableWorkerNodeCount();
|
||||
uint32 groupCount = list_length(ActiveReadableNodeList());
|
||||
double maxReduceTasksPerNode = MaxRunningTasksPerNode / 2.0;
|
||||
|
||||
uint32 partitionCount = (uint32) rint(groupCount * maxReduceTasksPerNode);
|
||||
|
@ -2289,7 +2289,7 @@ List *
|
|||
QueryPushdownSqlTaskList(Query *query, uint64 jobId,
|
||||
RelationRestrictionContext *relationRestrictionContext,
|
||||
List *prunedRelationShardList, TaskType taskType, bool
|
||||
modifyRequiresMasterEvaluation)
|
||||
modifyRequiresCoordinatorEvaluation)
|
||||
{
|
||||
List *sqlTaskList = NIL;
|
||||
ListCell *restrictionCell = NULL;
|
||||
|
@ -2393,7 +2393,7 @@ QueryPushdownSqlTaskList(Query *query, uint64 jobId,
|
|||
relationRestrictionContext,
|
||||
taskIdIndex,
|
||||
taskType,
|
||||
modifyRequiresMasterEvaluation);
|
||||
modifyRequiresCoordinatorEvaluation);
|
||||
subqueryTask->jobId = jobId;
|
||||
sqlTaskList = lappend(sqlTaskList, subqueryTask);
|
||||
|
||||
|
@ -2570,7 +2570,7 @@ ErrorIfUnsupportedShardDistribution(Query *query)
|
|||
static Task *
|
||||
QueryPushdownTaskCreate(Query *originalQuery, int shardIndex,
|
||||
RelationRestrictionContext *restrictionContext, uint32 taskId,
|
||||
TaskType taskType, bool modifyRequiresMasterEvaluation)
|
||||
TaskType taskType, bool modifyRequiresCoordinatorEvaluation)
|
||||
{
|
||||
Query *taskQuery = copyObject(originalQuery);
|
||||
|
||||
|
@ -2672,7 +2672,7 @@ QueryPushdownTaskCreate(Query *originalQuery, int shardIndex,
|
|||
|
||||
Task *subqueryTask = CreateBasicTask(jobId, taskId, taskType, NULL);
|
||||
|
||||
if ((taskType == MODIFY_TASK && !modifyRequiresMasterEvaluation) ||
|
||||
if ((taskType == MODIFY_TASK && !modifyRequiresCoordinatorEvaluation) ||
|
||||
taskType == READ_TASK)
|
||||
{
|
||||
pg_get_query_def(taskQuery, queryString);
|
||||
|
@ -4599,7 +4599,7 @@ GenerateSyntheticShardIntervalArray(int partitionCount)
|
|||
ShardInterval *shardInterval = CitusMakeNode(ShardInterval);
|
||||
|
||||
/* calculate the split of the hash space */
|
||||
int32 shardMinHashToken = INT32_MIN + (shardIndex * hashTokenIncrement);
|
||||
int32 shardMinHashToken = PG_INT32_MIN + (shardIndex * hashTokenIncrement);
|
||||
int32 shardMaxHashToken = shardMinHashToken + (hashTokenIncrement - 1);
|
||||
|
||||
shardInterval->relationId = InvalidOid;
|
||||
|
@ -5717,7 +5717,7 @@ AssignDualHashTaskList(List *taskList)
|
|||
* if subsequent jobs have a small number of tasks, we won't allocate the
|
||||
* tasks to the same worker repeatedly.
|
||||
*/
|
||||
List *workerNodeList = ActiveReadableWorkerNodeList();
|
||||
List *workerNodeList = ActiveReadableNodeList();
|
||||
uint32 workerNodeCount = (uint32) list_length(workerNodeList);
|
||||
uint32 beginningNodeIndex = jobId % workerNodeCount;
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#include "distributed/citus_ruleutils.h"
|
||||
#include "distributed/query_pushdown_planning.h"
|
||||
#include "distributed/query_utils.h"
|
||||
#include "distributed/recursive_planning.h"
|
||||
#include "distributed/reference_table_utils.h"
|
||||
#include "distributed/relation_restriction_equivalence.h"
|
||||
#include "distributed/relay_utility.h"
|
||||
|
@ -156,6 +157,7 @@ static DeferredErrorMessage * MultiRouterPlannableQuery(Query *query);
|
|||
static DeferredErrorMessage * ErrorIfQueryHasUnroutableModifyingCTE(Query *queryTree);
|
||||
static bool SelectsFromDistributedTable(List *rangeTableList, Query *query);
|
||||
static ShardPlacement * CreateDummyPlacement(bool hasLocalRelation);
|
||||
static ShardPlacement * CreateLocalDummyPlacement();
|
||||
static List * get_all_actual_clauses(List *restrictinfo_list);
|
||||
static int CompareInsertValuesByShardId(const void *leftElement,
|
||||
const void *rightElement);
|
||||
|
@ -507,7 +509,9 @@ ResultRelationOidForQuery(Query *query)
|
|||
|
||||
|
||||
/*
|
||||
* ExtractResultRelationRTE returns the table's resultRelation range table entry.
|
||||
* ExtractResultRelationRTE returns the table's resultRelation range table
|
||||
* entry. This returns NULL when there's no resultRelation, such as in a SELECT
|
||||
* query.
|
||||
*/
|
||||
RangeTblEntry *
|
||||
ExtractResultRelationRTE(Query *query)
|
||||
|
@ -521,6 +525,28 @@ ExtractResultRelationRTE(Query *query)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* ExtractResultRelationRTEOrError returns the table's resultRelation range table
|
||||
* entry and errors out if there's no result relation at all, e.g. like in a
|
||||
* SELECT query.
|
||||
*
|
||||
* This is a separate function (instead of using missingOk), so static analysis
|
||||
* reasons about NULL returns correctly.
|
||||
*/
|
||||
RangeTblEntry *
|
||||
ExtractResultRelationRTEOrError(Query *query)
|
||||
{
|
||||
RangeTblEntry *relation = ExtractResultRelationRTE(query);
|
||||
if (relation == NULL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("no result relation could be found for the query"),
|
||||
errhint("is this a SELECT query?")));
|
||||
}
|
||||
|
||||
return relation;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IsTidColumn gets a node and returns true if the node is a Var type of TID.
|
||||
*/
|
||||
|
@ -1302,9 +1328,9 @@ MasterIrreducibleExpressionWalker(Node *expression, WalkerState *state)
|
|||
|
||||
/*
|
||||
* In order for statement replication to give us consistent results it's important
|
||||
* that we either disallow or evaluate on the master anything which has a volatility
|
||||
* category above IMMUTABLE. Newer versions of postgres might add node types which
|
||||
* should be checked in this function.
|
||||
* that we either disallow or evaluate on the coordinator anything which has a
|
||||
* volatility category above IMMUTABLE. Newer versions of postgres might add node
|
||||
* types which should be checked in this function.
|
||||
*
|
||||
* Look through contain_mutable_functions_walker or future PG's equivalent for new
|
||||
* node types before bumping this version number to fix compilation; e.g. for any
|
||||
|
@ -1451,7 +1477,7 @@ RouterInsertJob(Query *originalQuery)
|
|||
}
|
||||
|
||||
Job *job = CreateJob(originalQuery);
|
||||
job->requiresMasterEvaluation = RequiresMasterEvaluation(originalQuery);
|
||||
job->requiresCoordinatorEvaluation = RequiresCoordinatorEvaluation(originalQuery);
|
||||
job->deferredPruning = true;
|
||||
job->partitionKeyValue = ExtractInsertPartitionKeyValue(originalQuery);
|
||||
|
||||
|
@ -1471,7 +1497,7 @@ CreateJob(Query *query)
|
|||
job->taskList = NIL;
|
||||
job->dependentJobList = NIL;
|
||||
job->subqueryPushdown = false;
|
||||
job->requiresMasterEvaluation = false;
|
||||
job->requiresCoordinatorEvaluation = false;
|
||||
job->deferredPruning = false;
|
||||
|
||||
return job;
|
||||
|
@ -1625,8 +1651,8 @@ RouterJob(Query *originalQuery, PlannerRestrictionContext *plannerRestrictionCon
|
|||
/* router planner should create task even if it doesn't hit a shard at all */
|
||||
bool replacePrunedQueryWithDummy = true;
|
||||
|
||||
/* check if this query requires master evaluation */
|
||||
bool requiresMasterEvaluation = RequiresMasterEvaluation(originalQuery);
|
||||
/* check if this query requires coordinator evaluation */
|
||||
bool requiresCoordinatorEvaluation = RequiresCoordinatorEvaluation(originalQuery);
|
||||
FastPathRestrictionContext *fastPathRestrictionContext =
|
||||
plannerRestrictionContext->fastPathRestrictionContext;
|
||||
|
||||
|
@ -1688,7 +1714,7 @@ RouterJob(Query *originalQuery, PlannerRestrictionContext *plannerRestrictionCon
|
|||
relationRestrictionContext,
|
||||
prunedShardIntervalListList,
|
||||
MODIFY_TASK,
|
||||
requiresMasterEvaluation);
|
||||
requiresCoordinatorEvaluation);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1696,7 +1722,7 @@ RouterJob(Query *originalQuery, PlannerRestrictionContext *plannerRestrictionCon
|
|||
placementList, shardId);
|
||||
}
|
||||
|
||||
job->requiresMasterEvaluation = requiresMasterEvaluation;
|
||||
job->requiresCoordinatorEvaluation = requiresCoordinatorEvaluation;
|
||||
return job;
|
||||
}
|
||||
|
||||
|
@ -1979,6 +2005,16 @@ SelectsFromDistributedTable(List *rangeTableList, Query *query)
|
|||
continue;
|
||||
}
|
||||
|
||||
if (rangeTableEntry->relkind == RELKIND_VIEW ||
|
||||
rangeTableEntry->relkind == RELKIND_MATVIEW)
|
||||
{
|
||||
/*
|
||||
* Skip over views, which would error out in GetCitusTableCacheEntry.
|
||||
* Distributed tables within (regular) views are already in rangeTableList.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
|
||||
CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(
|
||||
rangeTableEntry->relid);
|
||||
if (cacheEntry->partitionMethod != DISTRIBUTE_BY_NONE &&
|
||||
|
@ -2022,8 +2058,6 @@ PlanRouterQuery(Query *originalQuery,
|
|||
bool replacePrunedQueryWithDummy, bool *multiShardModifyQuery,
|
||||
Const **partitionValueConst)
|
||||
{
|
||||
RelationRestrictionContext *relationRestrictionContext =
|
||||
plannerRestrictionContext->relationRestrictionContext;
|
||||
bool isMultiShardQuery = false;
|
||||
DeferredErrorMessage *planningError = NULL;
|
||||
bool shardsPresent = false;
|
||||
|
@ -2136,7 +2170,12 @@ PlanRouterQuery(Query *originalQuery,
|
|||
/* we need anchor shard id for select queries with router planner */
|
||||
uint64 shardId = GetAnchorShardId(*prunedShardIntervalListList);
|
||||
|
||||
bool hasLocalRelation = relationRestrictionContext->hasLocalRelation;
|
||||
/*
|
||||
* We keep track of hasLocalRelation in plannerRestrictionContext->
|
||||
* relationRestrictionContext, but in rare cases tables are excluded from
|
||||
* there (e.g. catalog table on inside of an inner join). So we recheck.
|
||||
*/
|
||||
bool hasLocalRelation = FindNodeCheck((Node *) originalQuery, IsLocalTableRTE);
|
||||
|
||||
List *taskPlacementList =
|
||||
CreateTaskPlacementListForShardIntervals(*prunedShardIntervalListList,
|
||||
|
@ -2152,10 +2191,11 @@ PlanRouterQuery(Query *originalQuery,
|
|||
}
|
||||
|
||||
/*
|
||||
* If this is an UPDATE or DELETE query which requires master evaluation,
|
||||
* If this is an UPDATE or DELETE query which requires coordinator evaluation,
|
||||
* don't try update shard names, and postpone that to execution phase.
|
||||
*/
|
||||
if (!(UpdateOrDeleteQuery(originalQuery) && RequiresMasterEvaluation(originalQuery)))
|
||||
bool isUpdateOrDelete = UpdateOrDeleteQuery(originalQuery);
|
||||
if (!(isUpdateOrDelete && RequiresCoordinatorEvaluation(originalQuery)))
|
||||
{
|
||||
UpdateRelationToShardNames((Node *) originalQuery, *relationShardList);
|
||||
}
|
||||
|
@ -2232,6 +2272,25 @@ CreateTaskPlacementListForShardIntervals(List *shardIntervalListList, bool shard
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* CreateLocalDummyPlacement creates a dummy placement for the local node that
|
||||
* can be used for queries that don't involve any shards. The typical examples
|
||||
* are:
|
||||
* (a) queries that consist of only intermediate results
|
||||
* (b) queries that hit zero shards (... WHERE false;)
|
||||
*/
|
||||
static ShardPlacement *
|
||||
CreateLocalDummyPlacement()
|
||||
{
|
||||
ShardPlacement *dummyPlacement = CitusMakeNode(ShardPlacement);
|
||||
dummyPlacement->nodeId = LOCAL_NODE_ID;
|
||||
dummyPlacement->nodeName = LOCAL_HOST_NAME;
|
||||
dummyPlacement->nodePort = PostPortNumber;
|
||||
dummyPlacement->groupId = GetLocalGroupId();
|
||||
return dummyPlacement;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CreateDummyPlacement creates a dummy placement that can be used for queries
|
||||
* that don't involve any shards. The typical examples are:
|
||||
|
@ -2248,31 +2307,32 @@ static ShardPlacement *
|
|||
CreateDummyPlacement(bool hasLocalRelation)
|
||||
{
|
||||
static uint32 zeroShardQueryRoundRobin = 0;
|
||||
|
||||
if (TaskAssignmentPolicy != TASK_ASSIGNMENT_ROUND_ROBIN || hasLocalRelation)
|
||||
{
|
||||
return CreateLocalDummyPlacement();
|
||||
}
|
||||
|
||||
List *workerNodeList = ActiveReadableNonCoordinatorNodeList();
|
||||
if (workerNodeList == NIL)
|
||||
{
|
||||
/*
|
||||
* We want to round-robin over the workers, but there are no workers.
|
||||
* To make sure the query can still succeed we fall back to returning
|
||||
* a local dummy placement.
|
||||
*/
|
||||
return CreateLocalDummyPlacement();
|
||||
}
|
||||
|
||||
int workerNodeCount = list_length(workerNodeList);
|
||||
int workerNodeIndex = zeroShardQueryRoundRobin % workerNodeCount;
|
||||
WorkerNode *workerNode = (WorkerNode *) list_nth(workerNodeList,
|
||||
workerNodeIndex);
|
||||
|
||||
ShardPlacement *dummyPlacement = CitusMakeNode(ShardPlacement);
|
||||
SetPlacementNodeMetadata(dummyPlacement, workerNode);
|
||||
|
||||
if (TaskAssignmentPolicy == TASK_ASSIGNMENT_ROUND_ROBIN && !hasLocalRelation)
|
||||
{
|
||||
List *workerNodeList = ActiveReadableWorkerNodeList();
|
||||
if (workerNodeList == NIL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int workerNodeCount = list_length(workerNodeList);
|
||||
int workerNodeIndex = zeroShardQueryRoundRobin % workerNodeCount;
|
||||
WorkerNode *workerNode = (WorkerNode *) list_nth(workerNodeList,
|
||||
workerNodeIndex);
|
||||
SetPlacementNodeMetadata(dummyPlacement, workerNode);
|
||||
|
||||
zeroShardQueryRoundRobin++;
|
||||
}
|
||||
else
|
||||
{
|
||||
dummyPlacement->nodeId = LOCAL_NODE_ID;
|
||||
dummyPlacement->nodeName = LOCAL_HOST_NAME;
|
||||
dummyPlacement->nodePort = PostPortNumber;
|
||||
dummyPlacement->groupId = GetLocalGroupId();
|
||||
}
|
||||
zeroShardQueryRoundRobin++;
|
||||
|
||||
return dummyPlacement;
|
||||
}
|
||||
|
@ -2654,7 +2714,6 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError)
|
|||
Oid distributedTableId = ExtractFirstCitusTableId(query);
|
||||
CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(distributedTableId);
|
||||
char partitionMethod = cacheEntry->partitionMethod;
|
||||
uint32 rangeTableId = 1;
|
||||
List *modifyRouteList = NIL;
|
||||
ListCell *insertValuesCell = NULL;
|
||||
|
||||
|
@ -2692,7 +2751,7 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError)
|
|||
return modifyRouteList;
|
||||
}
|
||||
|
||||
Var *partitionColumn = PartitionColumn(distributedTableId, rangeTableId);
|
||||
Var *partitionColumn = cacheEntry->partitionColumn;
|
||||
|
||||
/* get full list of insert values and iterate over them to prune */
|
||||
List *insertValuesList = ExtractInsertValuesList(query, partitionColumn);
|
||||
|
@ -2701,8 +2760,38 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError)
|
|||
{
|
||||
InsertValues *insertValues = (InsertValues *) lfirst(insertValuesCell);
|
||||
List *prunedShardIntervalList = NIL;
|
||||
Expr *partitionValueExpr = (Expr *) strip_implicit_coercions(
|
||||
(Node *) insertValues->partitionValueExpr);
|
||||
Node *partitionValueExpr = (Node *) insertValues->partitionValueExpr;
|
||||
|
||||
/*
|
||||
* We only support constant partition values at this point. Sometimes
|
||||
* they are wrappend in an implicit coercion though. Most notably
|
||||
* FuncExpr coercions for casts created with CREATE CAST ... WITH
|
||||
* FUNCTION .. AS IMPLICIT. To support this first we strip them here.
|
||||
* Then we do the coercion manually below using
|
||||
* TransformPartitionRestrictionValue, if the types are not the same.
|
||||
*
|
||||
* NOTE: eval_const_expressions below would do some of these removals
|
||||
* too, but it's unclear if it would do all of them. It is possible
|
||||
* that there are no cases where this strip_implicit_coercions call is
|
||||
* really necessary at all, but currently that's hard to rule out.
|
||||
* So to be on the safe side we call strip_implicit_coercions too, to
|
||||
* be sure we support as much as possible.
|
||||
*/
|
||||
partitionValueExpr = strip_implicit_coercions(partitionValueExpr);
|
||||
|
||||
/*
|
||||
* By evaluating constant expressions an expression such as 2 + 4
|
||||
* will become const 6. That way we can use them as a partition column
|
||||
* value. Normally the planner evaluates constant expressions, but we
|
||||
* may be working on the original query tree here. So we do it here
|
||||
* explicitely before checking that the partition value is a const.
|
||||
*
|
||||
* NOTE: We do not use expression_planner here, since all it does
|
||||
* apart from calling eval_const_expressions is call fix_opfuncids.
|
||||
* This is not needed here, since it's a no-op for T_Const nodes and we
|
||||
* error out below in all other cases.
|
||||
*/
|
||||
partitionValueExpr = eval_const_expressions(NULL, partitionValueExpr);
|
||||
|
||||
if (!IsA(partitionValueExpr, Const))
|
||||
{
|
||||
|
@ -2719,21 +2808,20 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError)
|
|||
"column")));
|
||||
}
|
||||
|
||||
/* actually do the coercions that we skipped before, if fails throw an
|
||||
* error */
|
||||
if (partitionValueConst->consttype != partitionColumn->vartype)
|
||||
{
|
||||
bool missingOk = false;
|
||||
partitionValueConst =
|
||||
TransformPartitionRestrictionValue(partitionColumn,
|
||||
partitionValueConst,
|
||||
missingOk);
|
||||
}
|
||||
|
||||
if (partitionMethod == DISTRIBUTE_BY_HASH || partitionMethod ==
|
||||
DISTRIBUTE_BY_RANGE)
|
||||
{
|
||||
Var *distributionKey = cacheEntry->partitionColumn;
|
||||
|
||||
/* handle coercions, if fails throw an error */
|
||||
if (partitionValueConst->consttype != distributionKey->vartype)
|
||||
{
|
||||
bool missingOk = false;
|
||||
partitionValueConst =
|
||||
TransformPartitionRestrictionValue(distributionKey,
|
||||
partitionValueConst,
|
||||
missingOk);
|
||||
}
|
||||
|
||||
Datum partitionValue = partitionValueConst->constvalue;
|
||||
|
||||
ShardInterval *shardInterval = FindShardInterval(partitionValue, cacheEntry);
|
||||
|
|
|
@ -168,7 +168,6 @@ static bool ShouldRecursivelyPlanSetOperation(Query *query,
|
|||
RecursivePlanningContext *context);
|
||||
static void RecursivelyPlanSetOperations(Query *query, Node *node,
|
||||
RecursivePlanningContext *context);
|
||||
static bool IsLocalTableRTE(Node *node);
|
||||
static void RecursivelyPlanSubquery(Query *subquery,
|
||||
RecursivePlanningContext *planningContext);
|
||||
static DistributedSubPlan * CreateDistributedSubPlan(uint32 subPlanId,
|
||||
|
@ -1060,7 +1059,7 @@ RecursivelyPlanSetOperations(Query *query, Node *node,
|
|||
* is a range table relation entry that points to a local
|
||||
* relation (i.e., not a distributed relation).
|
||||
*/
|
||||
static bool
|
||||
bool
|
||||
IsLocalTableRTE(Node *node)
|
||||
{
|
||||
if (node == NULL)
|
||||
|
@ -1440,7 +1439,8 @@ TransformFunctionRTE(RangeTblEntry *rangeTblEntry)
|
|||
{
|
||||
ereport(ERROR, (errmsg("bad number of tuple descriptor attributes")));
|
||||
}
|
||||
for (targetColumnIndex = 0; targetColumnIndex < (AttrNumber) tupleDesc->natts;
|
||||
AttrNumber natts = tupleDesc->natts;
|
||||
for (targetColumnIndex = 0; targetColumnIndex < natts;
|
||||
targetColumnIndex++)
|
||||
{
|
||||
FormData_pg_attribute *attribute = TupleDescAttr(tupleDesc,
|
||||
|
|
|
@ -1576,6 +1576,22 @@ LowerShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach
|
|||
/* setup partitionColumnValue argument once */
|
||||
fcSetArg(compareFunction, 0, partitionColumnValue);
|
||||
|
||||
/*
|
||||
* Now we test partitionColumnValue used in where clause such as
|
||||
* partCol > partitionColumnValue (or partCol >= partitionColumnValue)
|
||||
* against four possibilities, these are:
|
||||
* 1) partitionColumnValue falls into a specific shard, such that:
|
||||
* partitionColumnValue >= shard[x].min, and
|
||||
* partitionColumnValue < shard[x].max (or partitionColumnValue <= shard[x].max).
|
||||
* 2) partitionColumnValue < shard[x].min for all the shards
|
||||
* 3) partitionColumnValue > shard[x].max for all the shards
|
||||
* 4) partitionColumnValue falls in between two shards, such that:
|
||||
* partitionColumnValue > shard[x].max and
|
||||
* partitionColumnValue < shard[x+1].min
|
||||
*
|
||||
* For 1), we find that shard in below loop using binary search and
|
||||
* return the index of it. For the others, see the end of this function.
|
||||
*/
|
||||
while (lowerBoundIndex < upperBoundIndex)
|
||||
{
|
||||
int middleIndex = lowerBoundIndex + ((upperBoundIndex - lowerBoundIndex) / 2);
|
||||
|
@ -1608,7 +1624,7 @@ LowerShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach
|
|||
continue;
|
||||
}
|
||||
|
||||
/* found interval containing partitionValue */
|
||||
/* partitionColumnValue falls into a specific shard, possibility 1) */
|
||||
return middleIndex;
|
||||
}
|
||||
|
||||
|
@ -1619,20 +1635,30 @@ LowerShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach
|
|||
* (we'd have hit the return middleIndex; case otherwise). Figure out
|
||||
* whether there's possibly any interval containing a value that's bigger
|
||||
* than the partition key one.
|
||||
*
|
||||
* Also note that we initialized lowerBoundIndex with 0. Similarly,
|
||||
* we always set it to the index of the shard that we consider as our
|
||||
* lower boundary during binary search.
|
||||
*/
|
||||
if (lowerBoundIndex == 0)
|
||||
if (lowerBoundIndex == shardCount)
|
||||
{
|
||||
/* all intervals are bigger, thus return 0 */
|
||||
return 0;
|
||||
}
|
||||
else if (lowerBoundIndex == shardCount)
|
||||
{
|
||||
/* partition value is bigger than all partition values */
|
||||
/*
|
||||
* Since lowerBoundIndex is an inclusive index, being equal to shardCount
|
||||
* means all the shards have smaller values than partitionColumnValue,
|
||||
* which corresponds to possibility 3).
|
||||
* In that case, since we can't have a lower bound shard, we return
|
||||
* INVALID_SHARD_INDEX here.
|
||||
*/
|
||||
return INVALID_SHARD_INDEX;
|
||||
}
|
||||
|
||||
/* value falls inbetween intervals */
|
||||
return lowerBoundIndex + 1;
|
||||
/*
|
||||
* partitionColumnValue is either smaller than all the shards or falls in
|
||||
* between two shards, which corresponds to possibility 2) or 4).
|
||||
* Knowing that lowerBoundIndex is an inclusive index, we directly return
|
||||
* it as the index for the lower bound shard here.
|
||||
*/
|
||||
return lowerBoundIndex;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1652,6 +1678,23 @@ UpperShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach
|
|||
/* setup partitionColumnValue argument once */
|
||||
fcSetArg(compareFunction, 0, partitionColumnValue);
|
||||
|
||||
/*
|
||||
* Now we test partitionColumnValue used in where clause such as
|
||||
* partCol < partitionColumnValue (or partCol <= partitionColumnValue)
|
||||
* against four possibilities, these are:
|
||||
* 1) partitionColumnValue falls into a specific shard, such that:
|
||||
* partitionColumnValue <= shard[x].max, and
|
||||
* partitionColumnValue > shard[x].min (or partitionColumnValue >= shard[x].min).
|
||||
* 2) partitionColumnValue > shard[x].max for all the shards
|
||||
* 3) partitionColumnValue < shard[x].min for all the shards
|
||||
* 4) partitionColumnValue falls in between two shards, such that:
|
||||
* partitionColumnValue > shard[x].max and
|
||||
* partitionColumnValue < shard[x+1].min
|
||||
*
|
||||
* For 1), we find that shard in below loop using binary search and
|
||||
* return the index of it. For the others, see the end of this function.
|
||||
*/
|
||||
|
||||
while (lowerBoundIndex < upperBoundIndex)
|
||||
{
|
||||
int middleIndex = lowerBoundIndex + ((upperBoundIndex - lowerBoundIndex) / 2);
|
||||
|
@ -1684,7 +1727,7 @@ UpperShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach
|
|||
continue;
|
||||
}
|
||||
|
||||
/* found interval containing partitionValue */
|
||||
/* partitionColumnValue falls into a specific shard, possibility 1) */
|
||||
return middleIndex;
|
||||
}
|
||||
|
||||
|
@ -1695,19 +1738,29 @@ UpperShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach
|
|||
* (we'd have hit the return middleIndex; case otherwise). Figure out
|
||||
* whether there's possibly any interval containing a value that's smaller
|
||||
* than the partition key one.
|
||||
*
|
||||
* Also note that we initialized upperBoundIndex with shardCount. Similarly,
|
||||
* we always set it to the index of the next shard that we consider as our
|
||||
* upper boundary during binary search.
|
||||
*/
|
||||
if (upperBoundIndex == shardCount)
|
||||
if (upperBoundIndex == 0)
|
||||
{
|
||||
/* all intervals are smaller, thus return 0 */
|
||||
return shardCount - 1;
|
||||
}
|
||||
else if (upperBoundIndex == 0)
|
||||
{
|
||||
/* partition value is smaller than all partition values */
|
||||
/*
|
||||
* Since upperBoundIndex is an exclusive index, being equal to 0 means
|
||||
* all the shards have greater values than partitionColumnValue, which
|
||||
* corresponds to possibility 3).
|
||||
* In that case, since we can't have an upper bound shard, we return
|
||||
* INVALID_SHARD_INDEX here.
|
||||
*/
|
||||
return INVALID_SHARD_INDEX;
|
||||
}
|
||||
|
||||
/* value falls inbetween intervals, return the inverval one smaller as bound */
|
||||
/*
|
||||
* partitionColumnValue is either greater than all the shards or falls in
|
||||
* between two shards, which corresponds to possibility 2) or 4).
|
||||
* Knowing that upperBoundIndex is an exclusive index, we return the index
|
||||
* for the previous shard here.
|
||||
*/
|
||||
return upperBoundIndex - 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
-- 9.4-1--9.4-2 was added later as a patch to fix a bug in our PG upgrade functions
|
||||
#include "udfs/citus_prepare_pg_upgrade/9.4-2.sql"
|
||||
#include "udfs/citus_finish_pg_upgrade/9.4-2.sql"
|
|
@ -0,0 +1,9 @@
|
|||
--
|
||||
-- 9.4-1--9.4-2 was added later as a patch to fix a bug in our PG upgrade functions
|
||||
--
|
||||
-- This script brings users who installed the patch released back to the 9.4-1
|
||||
-- upgrade path. We do this via a semantical downgrade since there has already been
|
||||
-- introduced new changes in the schema from 9.4-1 to 9.5-1. To make sure we include all
|
||||
-- changes made during that version change we decide to use the existing upgrade path from
|
||||
-- our later introduced 9.4-2 version.
|
||||
--
|
|
@ -0,0 +1,105 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_finish_pg_upgrade()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
SET search_path = pg_catalog
|
||||
AS $cppu$
|
||||
DECLARE
|
||||
table_name regclass;
|
||||
command text;
|
||||
trigger_name text;
|
||||
BEGIN
|
||||
--
|
||||
-- restore citus catalog tables
|
||||
--
|
||||
INSERT INTO pg_catalog.pg_dist_partition SELECT * FROM public.pg_dist_partition;
|
||||
INSERT INTO pg_catalog.pg_dist_shard SELECT * FROM public.pg_dist_shard;
|
||||
INSERT INTO pg_catalog.pg_dist_placement SELECT * FROM public.pg_dist_placement;
|
||||
INSERT INTO pg_catalog.pg_dist_node_metadata SELECT * FROM public.pg_dist_node_metadata;
|
||||
INSERT INTO pg_catalog.pg_dist_node SELECT * FROM public.pg_dist_node;
|
||||
INSERT INTO pg_catalog.pg_dist_local_group SELECT * FROM public.pg_dist_local_group;
|
||||
INSERT INTO pg_catalog.pg_dist_transaction SELECT * FROM public.pg_dist_transaction;
|
||||
INSERT INTO pg_catalog.pg_dist_colocation SELECT * FROM public.pg_dist_colocation;
|
||||
-- enterprise catalog tables
|
||||
INSERT INTO pg_catalog.pg_dist_authinfo SELECT * FROM public.pg_dist_authinfo;
|
||||
INSERT INTO pg_catalog.pg_dist_poolinfo SELECT * FROM public.pg_dist_poolinfo;
|
||||
|
||||
ALTER TABLE pg_catalog.pg_dist_rebalance_strategy DISABLE TRIGGER pg_dist_rebalance_strategy_enterprise_check_trigger;
|
||||
INSERT INTO pg_catalog.pg_dist_rebalance_strategy SELECT
|
||||
name,
|
||||
default_strategy,
|
||||
shard_cost_function::regprocedure::regproc,
|
||||
node_capacity_function::regprocedure::regproc,
|
||||
shard_allowed_on_node_function::regprocedure::regproc,
|
||||
default_threshold,
|
||||
minimum_threshold
|
||||
FROM public.pg_dist_rebalance_strategy;
|
||||
ALTER TABLE pg_catalog.pg_dist_rebalance_strategy ENABLE TRIGGER pg_dist_rebalance_strategy_enterprise_check_trigger;
|
||||
|
||||
--
|
||||
-- drop backup tables
|
||||
--
|
||||
DROP TABLE public.pg_dist_authinfo;
|
||||
DROP TABLE public.pg_dist_colocation;
|
||||
DROP TABLE public.pg_dist_local_group;
|
||||
DROP TABLE public.pg_dist_node;
|
||||
DROP TABLE public.pg_dist_node_metadata;
|
||||
DROP TABLE public.pg_dist_partition;
|
||||
DROP TABLE public.pg_dist_placement;
|
||||
DROP TABLE public.pg_dist_poolinfo;
|
||||
DROP TABLE public.pg_dist_shard;
|
||||
DROP TABLE public.pg_dist_transaction;
|
||||
|
||||
--
|
||||
-- reset sequences
|
||||
--
|
||||
PERFORM setval('pg_catalog.pg_dist_shardid_seq', (SELECT MAX(shardid)+1 AS max_shard_id FROM pg_dist_shard), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_placement_placementid_seq', (SELECT MAX(placementid)+1 AS max_placement_id FROM pg_dist_placement), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_groupid_seq', (SELECT MAX(groupid)+1 AS max_group_id FROM pg_dist_node), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_node_nodeid_seq', (SELECT MAX(nodeid)+1 AS max_node_id FROM pg_dist_node), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_colocationid_seq', (SELECT MAX(colocationid)+1 AS max_colocation_id FROM pg_dist_colocation), false);
|
||||
|
||||
--
|
||||
-- register triggers
|
||||
--
|
||||
FOR table_name IN SELECT logicalrelid FROM pg_catalog.pg_dist_partition
|
||||
LOOP
|
||||
trigger_name := 'truncate_trigger_' || table_name::oid;
|
||||
command := 'create trigger ' || trigger_name || ' after truncate on ' || table_name || ' execute procedure pg_catalog.citus_truncate_trigger()';
|
||||
EXECUTE command;
|
||||
command := 'update pg_trigger set tgisinternal = true where tgname = ' || quote_literal(trigger_name);
|
||||
EXECUTE command;
|
||||
END LOOP;
|
||||
|
||||
--
|
||||
-- set dependencies
|
||||
--
|
||||
INSERT INTO pg_depend
|
||||
SELECT
|
||||
'pg_class'::regclass::oid as classid,
|
||||
p.logicalrelid::regclass::oid as objid,
|
||||
0 as objsubid,
|
||||
'pg_extension'::regclass::oid as refclassid,
|
||||
(select oid from pg_extension where extname = 'citus') as refobjid,
|
||||
0 as refobjsubid ,
|
||||
'n' as deptype
|
||||
FROM pg_catalog.pg_dist_partition p;
|
||||
|
||||
-- restore pg_dist_object from the stable identifiers
|
||||
TRUNCATE citus.pg_dist_object;
|
||||
INSERT INTO citus.pg_dist_object (classid, objid, objsubid, distribution_argument_index, colocationid)
|
||||
SELECT
|
||||
address.classid,
|
||||
address.objid,
|
||||
address.objsubid,
|
||||
naming.distribution_argument_index,
|
||||
naming.colocationid
|
||||
FROM
|
||||
public.pg_dist_object naming,
|
||||
pg_catalog.pg_get_object_address(naming.type, naming.object_names, naming.object_args) address;
|
||||
|
||||
DROP TABLE public.pg_dist_object;
|
||||
END;
|
||||
$cppu$;
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_finish_pg_upgrade()
|
||||
IS 'perform tasks to restore citus settings from a location that has been prepared before pg_upgrade';
|
|
@ -85,17 +85,7 @@ BEGIN
|
|||
FROM pg_catalog.pg_dist_partition p;
|
||||
|
||||
-- restore pg_dist_object from the stable identifiers
|
||||
-- DELETE/INSERT to avoid primary key violations
|
||||
WITH old_records AS (
|
||||
DELETE FROM
|
||||
citus.pg_dist_object
|
||||
RETURNING
|
||||
type,
|
||||
object_names,
|
||||
object_args,
|
||||
distribution_argument_index,
|
||||
colocationid
|
||||
)
|
||||
TRUNCATE citus.pg_dist_object;
|
||||
INSERT INTO citus.pg_dist_object (classid, objid, objsubid, distribution_argument_index, colocationid)
|
||||
SELECT
|
||||
address.classid,
|
||||
|
@ -104,8 +94,10 @@ BEGIN
|
|||
naming.distribution_argument_index,
|
||||
naming.colocationid
|
||||
FROM
|
||||
old_records naming,
|
||||
pg_get_object_address(naming.type, naming.object_names, naming.object_args) address;
|
||||
public.pg_dist_object naming,
|
||||
pg_catalog.pg_get_object_address(naming.type, naming.object_names, naming.object_args) address;
|
||||
|
||||
DROP TABLE public.pg_dist_object;
|
||||
END;
|
||||
$cppu$;
|
||||
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_prepare_pg_upgrade()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
SET search_path = pg_catalog
|
||||
AS $cppu$
|
||||
BEGIN
|
||||
--
|
||||
-- backup citus catalog tables
|
||||
--
|
||||
CREATE TABLE public.pg_dist_partition AS SELECT * FROM pg_catalog.pg_dist_partition;
|
||||
CREATE TABLE public.pg_dist_shard AS SELECT * FROM pg_catalog.pg_dist_shard;
|
||||
CREATE TABLE public.pg_dist_placement AS SELECT * FROM pg_catalog.pg_dist_placement;
|
||||
CREATE TABLE public.pg_dist_node_metadata AS SELECT * FROM pg_catalog.pg_dist_node_metadata;
|
||||
CREATE TABLE public.pg_dist_node AS SELECT * FROM pg_catalog.pg_dist_node;
|
||||
CREATE TABLE public.pg_dist_local_group AS SELECT * FROM pg_catalog.pg_dist_local_group;
|
||||
CREATE TABLE public.pg_dist_transaction AS SELECT * FROM pg_catalog.pg_dist_transaction;
|
||||
CREATE TABLE public.pg_dist_colocation AS SELECT * FROM pg_catalog.pg_dist_colocation;
|
||||
-- enterprise catalog tables
|
||||
CREATE TABLE public.pg_dist_authinfo AS SELECT * FROM pg_catalog.pg_dist_authinfo;
|
||||
CREATE TABLE public.pg_dist_poolinfo AS SELECT * FROM pg_catalog.pg_dist_poolinfo;
|
||||
CREATE TABLE public.pg_dist_rebalance_strategy AS SELECT
|
||||
name,
|
||||
default_strategy,
|
||||
shard_cost_function::regprocedure::text,
|
||||
node_capacity_function::regprocedure::text,
|
||||
shard_allowed_on_node_function::regprocedure::text,
|
||||
default_threshold,
|
||||
minimum_threshold
|
||||
FROM pg_catalog.pg_dist_rebalance_strategy;
|
||||
|
||||
-- store upgrade stable identifiers on pg_dist_object catalog
|
||||
CREATE TABLE public.pg_dist_object AS SELECT
|
||||
address.type,
|
||||
address.object_names,
|
||||
address.object_args,
|
||||
objects.distribution_argument_index,
|
||||
objects.colocationid
|
||||
FROM citus.pg_dist_object objects,
|
||||
pg_catalog.pg_identify_object_as_address(objects.classid, objects.objid, objects.objsubid) address;
|
||||
END;
|
||||
$cppu$;
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_prepare_pg_upgrade()
|
||||
IS 'perform tasks to copy citus settings to a location that could later be restored after pg_upgrade is done';
|
|
@ -29,8 +29,14 @@ BEGIN
|
|||
FROM pg_catalog.pg_dist_rebalance_strategy;
|
||||
|
||||
-- store upgrade stable identifiers on pg_dist_object catalog
|
||||
UPDATE citus.pg_dist_object
|
||||
SET (type, object_names, object_args) = (SELECT * FROM pg_identify_object_as_address(classid, objid, objsubid));
|
||||
CREATE TABLE public.pg_dist_object AS SELECT
|
||||
address.type,
|
||||
address.object_names,
|
||||
address.object_args,
|
||||
objects.distribution_argument_index,
|
||||
objects.colocationid
|
||||
FROM citus.pg_dist_object objects,
|
||||
pg_catalog.pg_identify_object_as_address(objects.classid, objects.objid, objects.objsubid) address;
|
||||
END;
|
||||
$cppu$;
|
||||
|
||||
|
|
|
@ -217,8 +217,8 @@ create_monolithic_shard_row(PG_FUNCTION_ARGS)
|
|||
StringInfo maxInfo = makeStringInfo();
|
||||
uint64 newShardId = GetNextShardId();
|
||||
|
||||
appendStringInfo(minInfo, "%d", INT32_MIN);
|
||||
appendStringInfo(maxInfo, "%d", INT32_MAX);
|
||||
appendStringInfo(minInfo, "%d", PG_INT32_MIN);
|
||||
appendStringInfo(maxInfo, "%d", PG_INT32_MAX);
|
||||
|
||||
text *minInfoText = cstring_to_text(minInfo->data);
|
||||
text *maxInfoText = cstring_to_text(maxInfo->data);
|
||||
|
|
|
@ -75,7 +75,7 @@ wait_until_metadata_sync(PG_FUNCTION_ARGS)
|
|||
{
|
||||
uint32 timeout = PG_GETARG_UINT32(0);
|
||||
|
||||
List *workerList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock);
|
||||
bool waitNotifications = false;
|
||||
|
||||
WorkerNode *workerNode = NULL;
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* xact_stats.c
|
||||
*
|
||||
* This file contains functions to provide helper UDFs for testing transaction
|
||||
* statistics.
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "postgres.h"
|
||||
#include "funcapi.h"
|
||||
#include "libpq-fe.h"
|
||||
#include "miscadmin.h"
|
||||
#include "pgstat.h"
|
||||
|
||||
static Size MemoryContextTotalSpace(MemoryContext context);
|
||||
|
||||
PG_FUNCTION_INFO_V1(top_transaction_context_size);
|
||||
|
||||
/*
|
||||
* top_transaction_context_size returns current size of TopTransactionContext.
|
||||
*/
|
||||
Datum
|
||||
top_transaction_context_size(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Size totalSpace = MemoryContextTotalSpace(TopTransactionContext);
|
||||
PG_RETURN_INT64(totalSpace);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* MemoryContextTotalSpace returns total space allocated in context and its children.
|
||||
*/
|
||||
static Size
|
||||
MemoryContextTotalSpace(MemoryContext context)
|
||||
{
|
||||
Size totalSpace = 0;
|
||||
|
||||
MemoryContextCounters totals = { 0 };
|
||||
TopTransactionContext->methods->stats(TopTransactionContext, NULL, NULL, &totals);
|
||||
totalSpace += totals.totalspace;
|
||||
|
||||
for (MemoryContext child = context->firstchild;
|
||||
child != NULL;
|
||||
child = child->nextchild)
|
||||
{
|
||||
totalSpace += MemoryContextTotalSpace(child);
|
||||
}
|
||||
|
||||
return totalSpace;
|
||||
}
|
|
@ -217,7 +217,7 @@ Datum
|
|||
get_global_active_transactions(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
List *workerNodeList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(NoLock);
|
||||
List *connectionList = NIL;
|
||||
StringInfo queryToSend = makeStringInfo();
|
||||
|
||||
|
|
|
@ -311,7 +311,7 @@ citus_worker_stat_activity(PG_FUNCTION_ARGS)
|
|||
static List *
|
||||
CitusStatActivity(const char *statQuery)
|
||||
{
|
||||
List *workerNodeList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(NoLock);
|
||||
List *connectionList = NIL;
|
||||
|
||||
/*
|
||||
|
@ -437,7 +437,7 @@ GetLocalNodeCitusDistStat(const char *statQuery)
|
|||
int32 localGroupId = GetLocalGroupId();
|
||||
|
||||
/* get the current worker's node stats */
|
||||
List *workerNodeList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(NoLock);
|
||||
WorkerNode *workerNode = NULL;
|
||||
foreach_ptr(workerNode, workerNodeList)
|
||||
{
|
||||
|
|
|
@ -593,7 +593,17 @@ AdjustMaxPreparedTransactions(void)
|
|||
static void
|
||||
PushSubXact(SubTransactionId subId)
|
||||
{
|
||||
MemoryContext old_context = MemoryContextSwitchTo(CurTransactionContext);
|
||||
/*
|
||||
* We need to allocate these in TopTransactionContext instead of current
|
||||
* subxact's memory context. This is because AtSubCommit_Memory won't
|
||||
* delete the subxact's memory context unless it is empty, and this
|
||||
* can cause in memory leaks. For emptiness it just checks if the memory
|
||||
* has been reset, and we cannot reset the subxact context since other
|
||||
* data can be in the context that are needed by upper commits.
|
||||
*
|
||||
* See https://github.com/citusdata/citus/issues/3999
|
||||
*/
|
||||
MemoryContext old_context = MemoryContextSwitchTo(TopTransactionContext);
|
||||
|
||||
/* save provided subId as well as propagated SET LOCAL stmts */
|
||||
SubXactContext *state = palloc(sizeof(SubXactContext));
|
||||
|
@ -612,19 +622,34 @@ PushSubXact(SubTransactionId subId)
|
|||
static void
|
||||
PopSubXact(SubTransactionId subId)
|
||||
{
|
||||
MemoryContext old_context = MemoryContextSwitchTo(CurTransactionContext);
|
||||
SubXactContext *state = linitial(activeSubXactContexts);
|
||||
|
||||
/*
|
||||
* the previous activeSetStmts is already invalid because it's in the now-
|
||||
* aborted subxact (what we're popping), so no need to free before assign-
|
||||
* ing with the setLocalCmds of the popped context
|
||||
*/
|
||||
Assert(state->subId == subId);
|
||||
activeSetStmts = state->setLocalCmds;
|
||||
activeSubXactContexts = list_delete_first(activeSubXactContexts);
|
||||
|
||||
MemoryContextSwitchTo(old_context);
|
||||
/*
|
||||
* Free activeSetStmts to avoid memory leaks when we create subxacts
|
||||
* for each row, e.g. in exception handling of UDFs.
|
||||
*/
|
||||
if (activeSetStmts != NULL)
|
||||
{
|
||||
pfree(activeSetStmts->data);
|
||||
pfree(activeSetStmts);
|
||||
}
|
||||
|
||||
/*
|
||||
* SET LOCAL commands are local to subxact blocks. When a subxact commits
|
||||
* or rolls back, we should roll back our set of SET LOCAL commands to the
|
||||
* ones we had in the upper commit.
|
||||
*/
|
||||
activeSetStmts = state->setLocalCmds;
|
||||
|
||||
/*
|
||||
* Free state to avoid memory leaks when we create subxacts for each row,
|
||||
* e.g. in exception handling of UDFs.
|
||||
*/
|
||||
pfree(state);
|
||||
|
||||
activeSubXactContexts = list_delete_first(activeSubXactContexts);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/pg_dist_transaction.h"
|
||||
#include "distributed/remote_commands.h"
|
||||
#include "distributed/resource_lock.h"
|
||||
#include "distributed/transaction_recovery.h"
|
||||
#include "distributed/worker_manager.h"
|
||||
#include "distributed/version_compat.h"
|
||||
|
@ -118,6 +119,9 @@ RecoverTwoPhaseCommits(void)
|
|||
{
|
||||
int recoveredTransactionCount = 0;
|
||||
|
||||
/* take advisory lock first to avoid running concurrently */
|
||||
LockTransactionRecovery(ShareUpdateExclusiveLock);
|
||||
|
||||
List *workerList = ActivePrimaryNodeList(NoLock);
|
||||
WorkerNode *workerNode = NULL;
|
||||
foreach_ptr(workerNode, workerList)
|
||||
|
@ -172,7 +176,7 @@ RecoverWorkerTransactions(WorkerNode *workerNode)
|
|||
|
||||
/* take table lock first to avoid running concurrently */
|
||||
Relation pgDistTransaction = heap_open(DistTransactionRelationId(),
|
||||
ShareUpdateExclusiveLock);
|
||||
RowExclusiveLock);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgDistTransaction);
|
||||
|
||||
/*
|
||||
|
|
|
@ -156,7 +156,7 @@ static void
|
|||
SendCommandListToAllWorkersInternal(List *commandList, bool failOnError, const
|
||||
char *superuser)
|
||||
{
|
||||
List *workerNodeList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(NoLock);
|
||||
|
||||
WorkerNode *workerNode = NULL;
|
||||
foreach_ptr(workerNode, workerNodeList)
|
||||
|
@ -198,19 +198,21 @@ SendOptionalCommandListToAllWorkers(List *commandList, const char *superuser)
|
|||
List *
|
||||
TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode)
|
||||
{
|
||||
List *workerNodeList = ActivePrimaryWorkerNodeList(lockMode);
|
||||
List *workerNodeList = NIL;
|
||||
if (targetWorkerSet == ALL_SHARD_NODES)
|
||||
{
|
||||
workerNodeList = ActivePrimaryNodeList(lockMode);
|
||||
}
|
||||
else
|
||||
{
|
||||
workerNodeList = ActivePrimaryNonCoordinatorNodeList(lockMode);
|
||||
}
|
||||
List *result = NIL;
|
||||
|
||||
int32 localGroupId = GetLocalGroupId();
|
||||
|
||||
WorkerNode *workerNode = NULL;
|
||||
foreach_ptr(workerNode, workerNodeList)
|
||||
{
|
||||
if (targetWorkerSet == WORKERS_WITH_METADATA && !workerNode->hasMetadata)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
if (targetWorkerSet == OTHER_WORKERS && workerNode->groupId == localGroupId)
|
||||
if (targetWorkerSet == NON_COORDINATOR_METADATA_NODES && !workerNode->hasMetadata)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
@ -232,7 +234,7 @@ TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode)
|
|||
void
|
||||
SendBareCommandListToMetadataWorkers(List *commandList)
|
||||
{
|
||||
TargetWorkerSet targetWorkerSet = WORKERS_WITH_METADATA;
|
||||
TargetWorkerSet targetWorkerSet = NON_COORDINATOR_METADATA_NODES;
|
||||
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, ShareLock);
|
||||
char *nodeUser = CitusExtensionOwnerName();
|
||||
|
||||
|
@ -271,7 +273,7 @@ SendBareCommandListToMetadataWorkers(List *commandList)
|
|||
int
|
||||
SendBareOptionalCommandListToAllWorkersAsUser(List *commandList, const char *user)
|
||||
{
|
||||
TargetWorkerSet targetWorkerSet = ALL_WORKERS;
|
||||
TargetWorkerSet targetWorkerSet = NON_COORDINATOR_NODES;
|
||||
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, ShareLock);
|
||||
int maxError = RESPONSE_OKAY;
|
||||
|
||||
|
@ -318,11 +320,12 @@ SendCommandToMetadataWorkersParams(const char *command,
|
|||
const Oid *parameterTypes,
|
||||
const char *const *parameterValues)
|
||||
{
|
||||
List *workerNodeList = TargetWorkerSetNodeList(WORKERS_WITH_METADATA, ShareLock);
|
||||
List *workerNodeList = TargetWorkerSetNodeList(NON_COORDINATOR_METADATA_NODES,
|
||||
ShareLock);
|
||||
|
||||
ErrorIfAnyMetadataNodeOutOfSync(workerNodeList);
|
||||
|
||||
SendCommandToWorkersParamsInternal(WORKERS_WITH_METADATA, command, user,
|
||||
SendCommandToWorkersParamsInternal(NON_COORDINATOR_METADATA_NODES, command, user,
|
||||
parameterCount, parameterTypes,
|
||||
parameterValues);
|
||||
}
|
||||
|
|
|
@ -34,22 +34,23 @@
|
|||
static bool IsVariableExpression(Node *node);
|
||||
static Expr * citus_evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod,
|
||||
Oid result_collation,
|
||||
MasterEvaluationContext *masterEvaluationContext);
|
||||
CoordinatorEvaluationContext *
|
||||
coordinatorEvaluationContext);
|
||||
static bool CitusIsVolatileFunctionIdChecker(Oid func_id, void *context);
|
||||
static bool CitusIsMutableFunctionIdChecker(Oid func_id, void *context);
|
||||
static bool ShouldEvaluateExpression(Expr *expression);
|
||||
static bool ShouldEvaluateFunctionWithMasterContext(MasterEvaluationContext *
|
||||
evaluationContext);
|
||||
static bool ShouldEvaluateFunctions(CoordinatorEvaluationContext *evaluationContext);
|
||||
static void FixFunctionArguments(Node *expr);
|
||||
static bool FixFunctionArgumentsWalker(Node *expr, void *context);
|
||||
|
||||
|
||||
/*
|
||||
* RequiresMasterEvaluation returns the executor needs to reparse and
|
||||
* RequiresCoordinatorEvaluation returns the executor needs to reparse and
|
||||
* try to execute this query, which is the case if the query contains
|
||||
* any stable or volatile function.
|
||||
*/
|
||||
bool
|
||||
RequiresMasterEvaluation(Query *query)
|
||||
RequiresCoordinatorEvaluation(Query *query)
|
||||
{
|
||||
if (query->commandType == CMD_SELECT && !query->hasModifyingCTE)
|
||||
{
|
||||
|
@ -61,25 +62,25 @@ RequiresMasterEvaluation(Query *query)
|
|||
|
||||
|
||||
/*
|
||||
* ExecuteMasterEvaluableExpressions evaluates expressions and parameters
|
||||
* ExecuteCoordinatorEvaluableExpressions evaluates expressions and parameters
|
||||
* that can be resolved to a constant.
|
||||
*/
|
||||
void
|
||||
ExecuteMasterEvaluableExpressions(Query *query, PlanState *planState)
|
||||
ExecuteCoordinatorEvaluableExpressions(Query *query, PlanState *planState)
|
||||
{
|
||||
MasterEvaluationContext masterEvaluationContext;
|
||||
CoordinatorEvaluationContext coordinatorEvaluationContext;
|
||||
|
||||
masterEvaluationContext.planState = planState;
|
||||
coordinatorEvaluationContext.planState = planState;
|
||||
if (query->commandType == CMD_SELECT)
|
||||
{
|
||||
masterEvaluationContext.evaluationMode = EVALUATE_PARAMS;
|
||||
coordinatorEvaluationContext.evaluationMode = EVALUATE_PARAMS;
|
||||
}
|
||||
else
|
||||
{
|
||||
masterEvaluationContext.evaluationMode = EVALUATE_FUNCTIONS_PARAMS;
|
||||
coordinatorEvaluationContext.evaluationMode = EVALUATE_FUNCTIONS_PARAMS;
|
||||
}
|
||||
|
||||
PartiallyEvaluateExpression((Node *) query, &masterEvaluationContext);
|
||||
PartiallyEvaluateExpression((Node *) query, &coordinatorEvaluationContext);
|
||||
}
|
||||
|
||||
|
||||
|
@ -91,7 +92,7 @@ ExecuteMasterEvaluableExpressions(Query *query, PlanState *planState)
|
|||
*/
|
||||
Node *
|
||||
PartiallyEvaluateExpression(Node *expression,
|
||||
MasterEvaluationContext *masterEvaluationContext)
|
||||
CoordinatorEvaluationContext *coordinatorEvaluationContext)
|
||||
{
|
||||
if (expression == NULL || IsA(expression, Const))
|
||||
{
|
||||
|
@ -112,11 +113,45 @@ PartiallyEvaluateExpression(Node *expression,
|
|||
exprType(expression),
|
||||
exprTypmod(expression),
|
||||
exprCollation(expression),
|
||||
masterEvaluationContext);
|
||||
coordinatorEvaluationContext);
|
||||
}
|
||||
else if (ShouldEvaluateExpression((Expr *) expression) &&
|
||||
ShouldEvaluateFunctionWithMasterContext(masterEvaluationContext))
|
||||
ShouldEvaluateFunctions(coordinatorEvaluationContext))
|
||||
{
|
||||
/*
|
||||
* The planner normally evaluates constant expressions, but we may be
|
||||
* working on the original query tree. We could rely on
|
||||
* citus_evaluate_expr to evaluate constant expressions, but there are
|
||||
* certain node types that citus_evaluate_expr does not expect because
|
||||
* the planner normally replaces them (in particular, CollateExpr).
|
||||
* Hence, we first evaluate constant expressions using
|
||||
* eval_const_expressions before continuing.
|
||||
*
|
||||
* NOTE: We do not use expression_planner here, since all it does
|
||||
* apart from calling eval_const_expressions is call fix_opfuncids.
|
||||
* We do not need this, since that is already called in
|
||||
* citus_evaluate_expr. So we won't needlessly traverse the expression
|
||||
* tree by calling it another time.
|
||||
*/
|
||||
expression = eval_const_expressions(NULL, expression);
|
||||
|
||||
/*
|
||||
* It's possible that after evaluating const expressions we
|
||||
* actually don't need to evaluate this expression anymore e.g:
|
||||
*
|
||||
* 1 = 0 AND now() > timestamp '10-10-2000 00:00'
|
||||
*
|
||||
* This statement would simply resolve to false, because 1 = 0 is
|
||||
* false. That's why we now check again if we should evaluate the
|
||||
* expression and only continue if we still do.
|
||||
*/
|
||||
if (!ShouldEvaluateExpression((Expr *) expression))
|
||||
{
|
||||
return (Node *) expression_tree_mutator(expression,
|
||||
PartiallyEvaluateExpression,
|
||||
coordinatorEvaluationContext);
|
||||
}
|
||||
|
||||
if (FindNodeCheck(expression, IsVariableExpression))
|
||||
{
|
||||
/*
|
||||
|
@ -132,19 +167,19 @@ PartiallyEvaluateExpression(Node *expression,
|
|||
*/
|
||||
return (Node *) expression_tree_mutator(expression,
|
||||
PartiallyEvaluateExpression,
|
||||
masterEvaluationContext);
|
||||
coordinatorEvaluationContext);
|
||||
}
|
||||
|
||||
return (Node *) citus_evaluate_expr((Expr *) expression,
|
||||
exprType(expression),
|
||||
exprTypmod(expression),
|
||||
exprCollation(expression),
|
||||
masterEvaluationContext);
|
||||
coordinatorEvaluationContext);
|
||||
}
|
||||
else if (nodeTag == T_Query)
|
||||
{
|
||||
Query *query = (Query *) expression;
|
||||
MasterEvaluationContext subContext = *masterEvaluationContext;
|
||||
CoordinatorEvaluationContext subContext = *coordinatorEvaluationContext;
|
||||
if (query->commandType != CMD_SELECT)
|
||||
{
|
||||
/*
|
||||
|
@ -165,7 +200,7 @@ PartiallyEvaluateExpression(Node *expression,
|
|||
{
|
||||
return (Node *) expression_tree_mutator(expression,
|
||||
PartiallyEvaluateExpression,
|
||||
masterEvaluationContext);
|
||||
coordinatorEvaluationContext);
|
||||
}
|
||||
|
||||
return expression;
|
||||
|
@ -173,12 +208,12 @@ PartiallyEvaluateExpression(Node *expression,
|
|||
|
||||
|
||||
/*
|
||||
* ShouldEvaluateFunctionWithMasterContext is a helper function which is used to
|
||||
* ShouldEvaluateFunctions is a helper function which is used to
|
||||
* decide whether the function/expression should be evaluated with the input
|
||||
* masterEvaluationContext.
|
||||
* coordinatorEvaluationContext.
|
||||
*/
|
||||
static bool
|
||||
ShouldEvaluateFunctionWithMasterContext(MasterEvaluationContext *evaluationContext)
|
||||
ShouldEvaluateFunctions(CoordinatorEvaluationContext *evaluationContext)
|
||||
{
|
||||
if (evaluationContext == NULL)
|
||||
{
|
||||
|
@ -269,7 +304,7 @@ IsVariableExpression(Node *node)
|
|||
static Expr *
|
||||
citus_evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod,
|
||||
Oid result_collation,
|
||||
MasterEvaluationContext *masterEvaluationContext)
|
||||
CoordinatorEvaluationContext *coordinatorEvaluationContext)
|
||||
{
|
||||
PlanState *planState = NULL;
|
||||
EState *estate;
|
||||
|
@ -280,19 +315,19 @@ citus_evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod,
|
|||
int16 resultTypLen;
|
||||
bool resultTypByVal;
|
||||
|
||||
if (masterEvaluationContext)
|
||||
if (coordinatorEvaluationContext)
|
||||
{
|
||||
planState = masterEvaluationContext->planState;
|
||||
planState = coordinatorEvaluationContext->planState;
|
||||
|
||||
if (IsA(expr, Param))
|
||||
{
|
||||
if (masterEvaluationContext->evaluationMode == EVALUATE_NONE)
|
||||
if (coordinatorEvaluationContext->evaluationMode == EVALUATE_NONE)
|
||||
{
|
||||
/* bail out, the caller doesn't want params to be evaluated */
|
||||
return expr;
|
||||
}
|
||||
}
|
||||
else if (masterEvaluationContext->evaluationMode != EVALUATE_FUNCTIONS_PARAMS)
|
||||
else if (coordinatorEvaluationContext->evaluationMode != EVALUATE_FUNCTIONS_PARAMS)
|
||||
{
|
||||
/* should only get here for node types we should evaluate */
|
||||
Assert(ShouldEvaluateExpression(expr));
|
||||
|
|
|
@ -96,7 +96,7 @@ copyJobInfo(Job *newnode, Job *from)
|
|||
COPY_NODE_FIELD(taskList);
|
||||
COPY_NODE_FIELD(dependentJobList);
|
||||
COPY_SCALAR_FIELD(subqueryPushdown);
|
||||
COPY_SCALAR_FIELD(requiresMasterEvaluation);
|
||||
COPY_SCALAR_FIELD(requiresCoordinatorEvaluation);
|
||||
COPY_SCALAR_FIELD(deferredPruning);
|
||||
COPY_NODE_FIELD(partitionKeyValue);
|
||||
COPY_NODE_FIELD(localPlannedStatements);
|
||||
|
|
|
@ -340,7 +340,7 @@ OutJobFields(StringInfo str, const Job *node)
|
|||
WRITE_NODE_FIELD(taskList);
|
||||
WRITE_NODE_FIELD(dependentJobList);
|
||||
WRITE_BOOL_FIELD(subqueryPushdown);
|
||||
WRITE_BOOL_FIELD(requiresMasterEvaluation);
|
||||
WRITE_BOOL_FIELD(requiresCoordinatorEvaluation);
|
||||
WRITE_BOOL_FIELD(deferredPruning);
|
||||
WRITE_NODE_FIELD(partitionKeyValue);
|
||||
WRITE_NODE_FIELD(localPlannedStatements);
|
||||
|
|
|
@ -367,8 +367,8 @@ CreateCertificate(EVP_PKEY *privateKey)
|
|||
* Postgres does not check the validity on the certificates, but we can't omit the
|
||||
* dates either to create a certificate that can be parsed. We settled on a validity
|
||||
* of 0 seconds. When postgres would fix the validity check in a future version it
|
||||
* would fail right after an upgrade instead of setting a time bomb till certificate
|
||||
* expiration date.
|
||||
* would fail right after an upgrade. Instead of working until the certificate
|
||||
* expiration date and then suddenly erroring out.
|
||||
*/
|
||||
X509_gmtime_adj(X509_get_notBefore(certificate), 0);
|
||||
X509_gmtime_adj(X509_get_notAfter(certificate), 0);
|
||||
|
|
|
@ -104,6 +104,9 @@ static HTAB *MaintenanceDaemonDBHash;
|
|||
static volatile sig_atomic_t got_SIGHUP = false;
|
||||
static volatile sig_atomic_t got_SIGTERM = false;
|
||||
|
||||
/* set to true when becoming a maintenance daemon */
|
||||
static bool IsMaintenanceDaemon = false;
|
||||
|
||||
static void MaintenanceDaemonSigTermHandler(SIGNAL_ARGS);
|
||||
static void MaintenanceDaemonSigHupHandler(SIGNAL_ARGS);
|
||||
static size_t MaintenanceDaemonShmemSize(void);
|
||||
|
@ -160,15 +163,31 @@ InitializeMaintenanceDaemonBackend(void)
|
|||
return;
|
||||
}
|
||||
|
||||
/* maintenance daemon can ignore itself */
|
||||
if (dbData->workerPid == MyProcPid)
|
||||
if (!found)
|
||||
{
|
||||
/* ensure the values in MaintenanceDaemonDBData are zero */
|
||||
memset(((char *) dbData) + sizeof(Oid), 0,
|
||||
sizeof(MaintenanceDaemonDBData) - sizeof(Oid));
|
||||
}
|
||||
|
||||
if (IsMaintenanceDaemon)
|
||||
{
|
||||
/*
|
||||
* InitializeMaintenanceDaemonBackend is called by the maintenance daemon
|
||||
* itself. In that case, we clearly don't need to start another maintenance
|
||||
* daemon.
|
||||
*/
|
||||
Assert(found);
|
||||
Assert(dbData->workerPid == MyProcPid);
|
||||
|
||||
LWLockRelease(&MaintenanceDaemonControl->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!found || !dbData->daemonStarted)
|
||||
{
|
||||
Assert(dbData->workerPid == 0);
|
||||
|
||||
BackgroundWorker worker;
|
||||
BackgroundWorkerHandle *handle = NULL;
|
||||
|
||||
|
@ -287,13 +306,33 @@ CitusMaintenanceDaemonMain(Datum main_arg)
|
|||
proc_exit(0);
|
||||
}
|
||||
|
||||
if (myDbData->workerPid != 0)
|
||||
{
|
||||
/*
|
||||
* Another maintenance daemon is running. This usually happens because
|
||||
* postgres restarts the daemon after an non-zero exit, and
|
||||
* InitializeMaintenanceDaemonBackend started one before postgres did.
|
||||
* In that case, the first one stays and the last one exits.
|
||||
*/
|
||||
|
||||
proc_exit(0);
|
||||
}
|
||||
|
||||
before_shmem_exit(MaintenanceDaemonShmemExit, main_arg);
|
||||
|
||||
Assert(myDbData->workerPid == 0);
|
||||
|
||||
/* from this point, DROP DATABASE will attempt to kill the worker */
|
||||
/*
|
||||
* Signal that I am the maintenance daemon now.
|
||||
*
|
||||
* From this point, DROP DATABASE/EXTENSION will send a SIGTERM to me.
|
||||
*/
|
||||
myDbData->workerPid = MyProcPid;
|
||||
|
||||
/*
|
||||
* Signal that we are running. This in mainly needed in case of restart after
|
||||
* an error, otherwise the daemonStarted flag is already true.
|
||||
*/
|
||||
myDbData->daemonStarted = true;
|
||||
|
||||
/* wire up signals */
|
||||
pqsignal(SIGTERM, MaintenanceDaemonSigTermHandler);
|
||||
pqsignal(SIGHUP, MaintenanceDaemonSigHupHandler);
|
||||
|
@ -301,6 +340,8 @@ CitusMaintenanceDaemonMain(Datum main_arg)
|
|||
|
||||
myDbData->latch = MyLatch;
|
||||
|
||||
IsMaintenanceDaemon = true;
|
||||
|
||||
LWLockRelease(&MaintenanceDaemonControl->lock);
|
||||
|
||||
/*
|
||||
|
@ -334,8 +375,6 @@ CitusMaintenanceDaemonMain(Datum main_arg)
|
|||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
Assert(myDbData->workerPid == MyProcPid);
|
||||
|
||||
CitusTableCacheFlushInvalidatedEntries();
|
||||
|
||||
/*
|
||||
|
@ -562,15 +601,6 @@ CitusMaintenanceDaemonMain(Datum main_arg)
|
|||
/* check for changed configuration */
|
||||
if (myDbData->userOid != GetSessionUserId())
|
||||
{
|
||||
/*
|
||||
* Reset myDbData->daemonStarted so InitializeMaintenanceDaemonBackend()
|
||||
* notices this is a restart.
|
||||
*/
|
||||
LWLockAcquire(&MaintenanceDaemonControl->lock, LW_EXCLUSIVE);
|
||||
myDbData->daemonStarted = false;
|
||||
myDbData->workerPid = 0;
|
||||
LWLockRelease(&MaintenanceDaemonControl->lock);
|
||||
|
||||
/* return code of 1 requests worker restart */
|
||||
proc_exit(1);
|
||||
}
|
||||
|
@ -682,8 +712,15 @@ MaintenanceDaemonShmemExit(int code, Datum arg)
|
|||
MaintenanceDaemonDBData *myDbData = (MaintenanceDaemonDBData *)
|
||||
hash_search(MaintenanceDaemonDBHash, &databaseOid,
|
||||
HASH_FIND, NULL);
|
||||
if (myDbData && myDbData->workerPid == MyProcPid)
|
||||
|
||||
/* myDbData is NULL after StopMaintenanceDaemon */
|
||||
if (myDbData != NULL)
|
||||
{
|
||||
/*
|
||||
* Confirm that I am still the registered maintenance daemon before exiting.
|
||||
*/
|
||||
Assert(myDbData->workerPid == MyProcPid);
|
||||
|
||||
myDbData->daemonStarted = false;
|
||||
myDbData->workerPid = 0;
|
||||
}
|
||||
|
|
|
@ -230,7 +230,7 @@ LockShardListResourcesOnFirstWorker(LOCKMODE lockmode, List *shardIntervalList)
|
|||
static bool
|
||||
IsFirstWorkerNode()
|
||||
{
|
||||
List *workerNodeList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(NoLock);
|
||||
|
||||
workerNodeList = SortList(workerNodeList, CompareWorkerNodes);
|
||||
|
||||
|
@ -543,6 +543,20 @@ UnlockShardResource(uint64 shardId, LOCKMODE lockmode)
|
|||
}
|
||||
|
||||
|
||||
/* LockTransactionRecovery acquires a lock for transaction recovery */
|
||||
void
|
||||
LockTransactionRecovery(LOCKMODE lockmode)
|
||||
{
|
||||
LOCKTAG tag;
|
||||
const bool sessionLock = false;
|
||||
const bool dontWait = false;
|
||||
|
||||
SET_LOCKTAG_CITUS_OPERATION(tag, CITUS_TRANSACTION_RECOVERY);
|
||||
|
||||
(void) LockAcquire(&tag, lockmode, sessionLock, dontWait);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* LockJobResource acquires a lock for creating resources associated with the
|
||||
* given jobId. This resource is typically a job schema (namespace), and less
|
||||
|
|
|
@ -307,7 +307,7 @@ FindShardInterval(Datum partitionColumnValue, CitusTableCacheEntry *cacheEntry)
|
|||
* INVALID_SHARD_INDEX is returned). This should only happen if something is
|
||||
* terribly wrong, either metadata tables are corrupted or we have a bug
|
||||
* somewhere. Such as a hash function which returns a value not in the range
|
||||
* of [INT32_MIN, INT32_MAX] can fire this.
|
||||
* of [PG_INT32_MIN, PG_INT32_MAX] can fire this.
|
||||
*/
|
||||
int
|
||||
FindShardIntervalIndex(Datum searchedValue, CitusTableCacheEntry *cacheEntry)
|
||||
|
@ -348,20 +348,8 @@ FindShardIntervalIndex(Datum searchedValue, CitusTableCacheEntry *cacheEntry)
|
|||
else
|
||||
{
|
||||
int hashedValue = DatumGetInt32(searchedValue);
|
||||
uint64 hashTokenIncrement = HASH_TOKEN_COUNT / shardCount;
|
||||
|
||||
shardIndex = (uint32) (hashedValue - INT32_MIN) / hashTokenIncrement;
|
||||
Assert(shardIndex <= shardCount);
|
||||
|
||||
/*
|
||||
* If the shard count is not power of 2, the range of the last
|
||||
* shard becomes larger than others. For that extra piece of range,
|
||||
* we still need to use the last shard.
|
||||
*/
|
||||
if (shardIndex == shardCount)
|
||||
{
|
||||
shardIndex = shardCount - 1;
|
||||
}
|
||||
shardIndex = CalculateUniformHashRangeIndex(hashedValue, shardCount);
|
||||
}
|
||||
}
|
||||
else if (partitionMethod == DISTRIBUTE_BY_NONE)
|
||||
|
@ -442,6 +430,48 @@ SearchCachedShardInterval(Datum partitionColumnValue, ShardInterval **shardInter
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* CalculateUniformHashRangeIndex returns the index of the hash range in
|
||||
* which hashedValue falls, assuming shardCount uniform hash ranges.
|
||||
*
|
||||
* We use 64-bit integers to avoid overflow issues during arithmetic.
|
||||
*
|
||||
* NOTE: This function is ONLY for hash-distributed tables with uniform
|
||||
* hash ranges.
|
||||
*/
|
||||
int
|
||||
CalculateUniformHashRangeIndex(int hashedValue, int shardCount)
|
||||
{
|
||||
int64 hashedValue64 = (int64) hashedValue;
|
||||
|
||||
/* normalize to the 0-UINT32_MAX range */
|
||||
int64 normalizedHashValue = hashedValue64 - PG_INT32_MIN;
|
||||
|
||||
/* size of each hash range */
|
||||
int64 hashRangeSize = HASH_TOKEN_COUNT / shardCount;
|
||||
|
||||
/* index of hash range into which the hash value falls */
|
||||
int shardIndex = (int) (normalizedHashValue / hashRangeSize);
|
||||
|
||||
if (shardIndex < 0 || shardIndex > shardCount)
|
||||
{
|
||||
ereport(ERROR, (errmsg("bug: shard index %d out of bounds", shardIndex)));
|
||||
}
|
||||
|
||||
/*
|
||||
* If the shard count is not power of 2, the range of the last
|
||||
* shard becomes larger than others. For that extra piece of range,
|
||||
* we still need to use the last shard.
|
||||
*/
|
||||
if (shardIndex == shardCount)
|
||||
{
|
||||
shardIndex = shardCount - 1;
|
||||
}
|
||||
|
||||
return shardIndex;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SingleReplicatedTable checks whether all shards of a distributed table, do not have
|
||||
* more than one replica. If even one shard has more than one replica, this function
|
||||
|
@ -454,7 +484,7 @@ SingleReplicatedTable(Oid relationId)
|
|||
List *shardPlacementList = NIL;
|
||||
|
||||
/* we could have append/range distributed tables without shards */
|
||||
if (list_length(shardList) <= 1)
|
||||
if (list_length(shardList) == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ CollectBasicUsageStatistics(void)
|
|||
distTableOids = DistTableOidList();
|
||||
roundedDistTableCount = NextPow2(list_length(distTableOids));
|
||||
roundedClusterSize = NextPow2(DistributedTablesSize(distTableOids));
|
||||
workerNodeCount = ActivePrimaryWorkerNodeCount();
|
||||
workerNodeCount = ActivePrimaryNonCoordinatorNodeCount();
|
||||
metadataJsonbDatum = DistNodeMetadata();
|
||||
metadataJsonbStr = DatumGetCString(DirectFunctionCall1(jsonb_out,
|
||||
metadataJsonbDatum));
|
||||
|
|
|
@ -250,7 +250,7 @@ worker_hash_partition_table(PG_FUNCTION_ARGS)
|
|||
static ShardInterval **
|
||||
SyntheticShardIntervalArrayForShardMinValues(Datum *shardMinValues, int shardCount)
|
||||
{
|
||||
Datum nextShardMaxValue = Int32GetDatum(INT32_MAX);
|
||||
Datum nextShardMaxValue = Int32GetDatum(PG_INT32_MAX);
|
||||
ShardInterval **syntheticShardIntervalArray =
|
||||
palloc(sizeof(ShardInterval *) * shardCount);
|
||||
|
||||
|
@ -780,7 +780,12 @@ CitusRemoveDirectory(const char *filename)
|
|||
/* we now have an empty directory or a regular file, remove it */
|
||||
if (S_ISDIR(fileStat.st_mode))
|
||||
{
|
||||
removed = rmdir(filename);
|
||||
/*
|
||||
* We ignore the TOCTUO race condition static analysis warning
|
||||
* here, since we don't actually read the files or directories. We
|
||||
* simply want to remove them.
|
||||
*/
|
||||
removed = rmdir(filename); /* lgtm[cpp/toctou-race-condition] */
|
||||
|
||||
if (errno == ENOTEMPTY || errno == EEXIST)
|
||||
{
|
||||
|
@ -789,7 +794,12 @@ CitusRemoveDirectory(const char *filename)
|
|||
}
|
||||
else
|
||||
{
|
||||
removed = unlink(filename);
|
||||
/*
|
||||
* We ignore the TOCTUO race condition static analysis warning
|
||||
* here, since we don't actually read the files or directories. We
|
||||
* simply want to remove them.
|
||||
*/
|
||||
removed = unlink(filename); /* lgtm[cpp/toctou-race-condition] */
|
||||
}
|
||||
|
||||
if (removed != 0 && errno != ENOENT)
|
||||
|
@ -1240,7 +1250,6 @@ HashPartitionId(Datum partitionValue, Oid partitionCollation, const void *contex
|
|||
FmgrInfo *comparisonFunction = hashPartitionContext->comparisonFunction;
|
||||
Datum hashDatum = FunctionCall1Coll(hashFunction, DEFAULT_COLLATION_OID,
|
||||
partitionValue);
|
||||
int32 hashResult = 0;
|
||||
uint32 hashPartitionId = 0;
|
||||
|
||||
if (hashDatum == 0)
|
||||
|
@ -1250,10 +1259,8 @@ HashPartitionId(Datum partitionValue, Oid partitionCollation, const void *contex
|
|||
|
||||
if (hashPartitionContext->hasUniformHashDistribution)
|
||||
{
|
||||
uint64 hashTokenIncrement = HASH_TOKEN_COUNT / partitionCount;
|
||||
|
||||
hashResult = DatumGetInt32(hashDatum);
|
||||
hashPartitionId = (uint32) (hashResult - INT32_MIN) / hashTokenIncrement;
|
||||
int hashValue = DatumGetInt32(hashDatum);
|
||||
hashPartitionId = CalculateUniformHashRangeIndex(hashValue, partitionCount);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -17,10 +17,10 @@
|
|||
|
||||
|
||||
/*
|
||||
* MasterEvaluationMode is used to signal what expressions in the query
|
||||
* CoordinatorEvaluationMode is used to signal what expressions in the query
|
||||
* should be evaluated on the coordinator.
|
||||
*/
|
||||
typedef enum MasterEvaluationMode
|
||||
typedef enum CoordinatorEvaluationMode
|
||||
{
|
||||
/* evaluate nothing */
|
||||
EVALUATE_NONE = 0,
|
||||
|
@ -30,23 +30,24 @@ typedef enum MasterEvaluationMode
|
|||
|
||||
/* evaluate both the functions/expressions and the external paramaters */
|
||||
EVALUATE_FUNCTIONS_PARAMS
|
||||
} MasterEvaluationMode;
|
||||
} CoordinatorEvaluationMode;
|
||||
|
||||
/*
|
||||
* This struct is used to pass information to master
|
||||
* evaluation logic.
|
||||
*/
|
||||
typedef struct MasterEvaluationContext
|
||||
typedef struct CoordinatorEvaluationContext
|
||||
{
|
||||
PlanState *planState;
|
||||
MasterEvaluationMode evaluationMode;
|
||||
} MasterEvaluationContext;
|
||||
CoordinatorEvaluationMode evaluationMode;
|
||||
} CoordinatorEvaluationContext;
|
||||
|
||||
|
||||
extern bool RequiresMasterEvaluation(Query *query);
|
||||
extern void ExecuteMasterEvaluableExpressions(Query *query, PlanState *planState);
|
||||
extern bool RequiresCoordinatorEvaluation(Query *query);
|
||||
extern void ExecuteCoordinatorEvaluableExpressions(Query *query, PlanState *planState);
|
||||
extern Node * PartiallyEvaluateExpression(Node *expression,
|
||||
MasterEvaluationContext *masterEvaluationContext);
|
||||
CoordinatorEvaluationContext *
|
||||
coordinatorEvaluationContext);
|
||||
extern bool CitusIsVolatileFunction(Node *node);
|
||||
extern bool CitusIsMutableFunction(Node *node);
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ typedef struct CitusScanState
|
|||
CustomScanState customScanState; /* underlying custom scan node */
|
||||
|
||||
/* function that gets called before postgres starts its execution */
|
||||
bool finishedPreScan; /* flag to check if the pre scan is finished */
|
||||
void (*PreExecScan)(struct CitusScanState *scanState);
|
||||
|
||||
DistributedPlan *distributedPlan; /* distributed execution plan */
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* merge_planner.h
|
||||
* combine_query_planner.h
|
||||
* Function declarations for building planned statements; these statements
|
||||
* are then executed on the coordinator node.
|
||||
*
|
||||
|
@ -9,8 +9,8 @@
|
|||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#ifndef MERGE_PLANNER_H
|
||||
#define MERGE_PLANNER_H
|
||||
#ifndef COMBINE_QUERY_PLANNER_H
|
||||
#define COMBINE_QUERY_PLANNER_H
|
||||
|
||||
#include "lib/stringinfo.h"
|
||||
#include "nodes/parsenodes.h"
|
||||
|
@ -29,10 +29,10 @@ struct CustomScan;
|
|||
extern Path * CreateCitusCustomScanPath(PlannerInfo *root, RelOptInfo *relOptInfo,
|
||||
Index restrictionIndex, RangeTblEntry *rte,
|
||||
CustomScan *remoteScan);
|
||||
extern PlannedStmt * MasterNodeSelectPlan(struct DistributedPlan *distributedPlan,
|
||||
struct CustomScan *dataScan);
|
||||
extern PlannedStmt * PlanCombineQuery(struct DistributedPlan *distributedPlan,
|
||||
struct CustomScan *dataScan);
|
||||
extern Unique * make_unique_from_sortclauses(Plan *lefttree, List *distinctList);
|
||||
extern bool ReplaceCitusExtraDataContainer;
|
||||
extern CustomScan *ReplaceCitusExtraDataContainerWithCustomScan;
|
||||
|
||||
#endif /* MERGE_PLANNER_H */
|
||||
#endif /* COMBINE_QUERY_PLANNER_H */
|
||||
|
|
|
@ -96,7 +96,7 @@ typedef enum MultiConnectionStructInitializationState
|
|||
} MultiConnectionStructInitializationState;
|
||||
|
||||
|
||||
/* declaring this directly above makes uncrustify go crazy */
|
||||
/* declaring this directly above causes uncrustify to format it badly */
|
||||
typedef enum MultiConnectionMode MultiConnectionMode;
|
||||
|
||||
typedef struct MultiConnection
|
||||
|
@ -173,6 +173,9 @@ typedef struct ConnectionHashEntry
|
|||
{
|
||||
ConnectionHashKey key;
|
||||
dlist_head *connections;
|
||||
|
||||
/* connections list is valid or not */
|
||||
bool isValid;
|
||||
} ConnectionHashEntry;
|
||||
|
||||
/* hash entry for cached connection parameters */
|
||||
|
|
|
@ -99,7 +99,8 @@ extern void QualifyAlterFunctionDependsStmt(Node *stmt);
|
|||
extern char * DeparseAlterRoleStmt(Node *stmt);
|
||||
extern char * DeparseAlterRoleSetStmt(Node *stmt);
|
||||
|
||||
extern Node * MakeSetStatementArgument(char *configurationName, char *configurationValue);
|
||||
extern List * MakeSetStatementArguments(char *configurationName,
|
||||
char *configurationValue);
|
||||
extern void QualifyAlterRoleSetStmt(Node *stmt);
|
||||
|
||||
/* forward declarations for deparse_extension_stmts.c */
|
||||
|
|
|
@ -26,5 +26,6 @@ extern List * ExplainAnalyzeTaskList(List *originalTaskList,
|
|||
TupleDestination *defaultTupleDest, TupleDesc
|
||||
tupleDesc, ParamListInfo params);
|
||||
extern bool RequestedForExplainAnalyze(CitusScanState *node);
|
||||
extern void ResetExplainAnalyzeData(List *taskList);
|
||||
|
||||
#endif /* MULTI_EXPLAIN_H */
|
||||
|
|
|
@ -151,7 +151,7 @@ typedef struct Job
|
|||
List *taskList;
|
||||
List *dependentJobList;
|
||||
bool subqueryPushdown;
|
||||
bool requiresMasterEvaluation; /* only applies to modify jobs */
|
||||
bool requiresCoordinatorEvaluation; /* only applies to modify jobs */
|
||||
bool deferredPruning;
|
||||
Const *partitionKeyValue;
|
||||
|
||||
|
@ -599,7 +599,7 @@ extern List * QueryPushdownSqlTaskList(Query *query, uint64 jobId,
|
|||
RelationRestrictionContext *
|
||||
relationRestrictionContext,
|
||||
List *prunedRelationShardList, TaskType taskType,
|
||||
bool modifyRequiresMasterEvaluation);
|
||||
bool modifyRequiresCoordinatorEvaluation);
|
||||
|
||||
/* function declarations for managing jobs */
|
||||
extern uint64 UniqueJobId(void);
|
||||
|
|
|
@ -71,6 +71,7 @@ extern Oid ExtractFirstCitusTableId(Query *query);
|
|||
extern RangeTblEntry * ExtractSelectRangeTableEntry(Query *query);
|
||||
extern Oid ModifyQueryResultRelationId(Query *query);
|
||||
extern RangeTblEntry * ExtractResultRelationRTE(Query *query);
|
||||
extern RangeTblEntry * ExtractResultRelationRTEOrError(Query *query);
|
||||
extern RangeTblEntry * ExtractDistributedInsertValuesRTE(Query *query);
|
||||
extern bool IsMultiRowInsert(Query *query);
|
||||
extern void AddShardIntervalRestrictionToSelect(Query *subqery,
|
||||
|
|
|
@ -33,5 +33,6 @@ extern Query * BuildReadIntermediateResultsArrayQuery(List *targetEntryList,
|
|||
List *resultIdList,
|
||||
bool useBinaryCopyFormat);
|
||||
extern bool GeneratingSubplans(void);
|
||||
extern bool IsLocalTableRTE(Node *node);
|
||||
|
||||
#endif /* RECURSIVE_PLANNING_H */
|
||||
|
|
|
@ -38,8 +38,14 @@ typedef enum AdvisoryLocktagClass
|
|||
ADV_LOCKTAG_CLASS_CITUS_JOB = 6,
|
||||
ADV_LOCKTAG_CLASS_CITUS_REBALANCE_COLOCATION = 7,
|
||||
ADV_LOCKTAG_CLASS_CITUS_COLOCATED_SHARDS_METADATA = 8,
|
||||
ADV_LOCKTAG_CLASS_CITUS_OPERATIONS = 9
|
||||
} AdvisoryLocktagClass;
|
||||
|
||||
/* CitusOperations has constants for citus operations */
|
||||
typedef enum CitusOperations
|
||||
{
|
||||
CITUS_TRANSACTION_RECOVERY = 0
|
||||
} CitusOperations;
|
||||
|
||||
/* reuse advisory lock, but with different, unused field 4 (4)*/
|
||||
#define SET_LOCKTAG_SHARD_METADATA_RESOURCE(tag, db, shardid) \
|
||||
|
@ -83,6 +89,14 @@ typedef enum AdvisoryLocktagClass
|
|||
(uint32) (colocationOrTableId), \
|
||||
ADV_LOCKTAG_CLASS_CITUS_REBALANCE_COLOCATION)
|
||||
|
||||
/* advisory lock for citus operations, also it has the database hardcoded to MyDatabaseId,
|
||||
* to ensure the locks are local to each database */
|
||||
#define SET_LOCKTAG_CITUS_OPERATION(tag, operationId) \
|
||||
SET_LOCKTAG_ADVISORY(tag, \
|
||||
MyDatabaseId, \
|
||||
(uint32) 0, \
|
||||
(uint32) operationId, \
|
||||
ADV_LOCKTAG_CLASS_CITUS_OPERATIONS)
|
||||
|
||||
/* Lock shard/relation metadata for safe modifications */
|
||||
extern void LockShardDistributionMetadata(int64 shardId, LOCKMODE lockMode);
|
||||
|
@ -110,6 +124,9 @@ extern void UnlockColocationId(int colocationId, LOCKMODE lockMode);
|
|||
extern void LockShardListMetadata(List *shardIntervalList, LOCKMODE lockMode);
|
||||
extern void LockShardsInPlacementListMetadata(List *shardPlacementList,
|
||||
LOCKMODE lockMode);
|
||||
|
||||
extern void LockTransactionRecovery(LOCKMODE lockMode);
|
||||
|
||||
extern void SerializeNonCommutativeWrites(List *shardIntervalList, LOCKMODE lockMode);
|
||||
extern void LockRelationShardResources(List *relationShardList, LOCKMODE lockMode);
|
||||
extern List * GetSortedReferenceShardIntervals(List *relationList);
|
||||
|
|
|
@ -47,6 +47,7 @@ extern int CompareShardPlacementsByShardId(const void *leftElement,
|
|||
extern int CompareRelationShards(const void *leftElement,
|
||||
const void *rightElement);
|
||||
extern int ShardIndex(ShardInterval *shardInterval);
|
||||
extern int CalculateUniformHashRangeIndex(int hashedValue, int shardCount);
|
||||
extern ShardInterval * FindShardInterval(Datum partitionColumnValue,
|
||||
CitusTableCacheEntry *cacheEntry);
|
||||
extern int FindShardIntervalIndex(Datum searchedValue, CitusTableCacheEntry *cacheEntry);
|
||||
|
|
|
@ -70,14 +70,14 @@ extern WorkerNode * WorkerGetRoundRobinCandidateNode(List *workerNodeList,
|
|||
uint64 shardId,
|
||||
uint32 placementIndex);
|
||||
extern WorkerNode * WorkerGetLocalFirstCandidateNode(List *currentNodeList);
|
||||
extern uint32 ActivePrimaryWorkerNodeCount(void);
|
||||
extern List * ActivePrimaryWorkerNodeList(LOCKMODE lockMode);
|
||||
extern uint32 ActivePrimaryNonCoordinatorNodeCount(void);
|
||||
extern List * ActivePrimaryNonCoordinatorNodeList(LOCKMODE lockMode);
|
||||
extern List * ActivePrimaryNodeList(LOCKMODE lockMode);
|
||||
extern List * ReferenceTablePlacementNodeList(LOCKMODE lockMode);
|
||||
extern List * DistributedTablePlacementNodeList(LOCKMODE lockMode);
|
||||
extern bool NodeCanHaveDistTablePlacements(WorkerNode *node);
|
||||
extern uint32 ActiveReadableWorkerNodeCount(void);
|
||||
extern List * ActiveReadableWorkerNodeList(void);
|
||||
extern uint32 ActiveReadableNonCoordinatorNodeCount(void);
|
||||
extern List * ActiveReadableNonCoordinatorNodeList(void);
|
||||
extern List * ActiveReadableNodeList(void);
|
||||
extern WorkerNode * FindWorkerNode(const char *nodeName, int32 nodePort);
|
||||
extern WorkerNode * ForceFindWorkerNode(const char *nodeName, int32 nodePort);
|
||||
|
|
|
@ -22,9 +22,9 @@
|
|||
*/
|
||||
typedef enum TargetWorkerSet
|
||||
{
|
||||
WORKERS_WITH_METADATA,
|
||||
OTHER_WORKERS,
|
||||
ALL_WORKERS
|
||||
NON_COORDINATOR_METADATA_NODES,
|
||||
NON_COORDINATOR_NODES,
|
||||
ALL_SHARD_NODES
|
||||
} TargetWorkerSet;
|
||||
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
CREATE SCHEMA alter_role;
|
||||
CREATE SCHEMA ",CitUs,.TeeN!?";
|
||||
-- test if the passowrd of the extension owner can be upgraded
|
||||
ALTER ROLE CURRENT_USER PASSWORD 'password123' VALID UNTIL 'infinity';
|
||||
SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = current_user$$);
|
||||
|
@ -111,6 +112,12 @@ SELECT run_command_on_workers('SHOW enable_hashagg');
|
|||
(localhost,57638,t,off)
|
||||
(1 row)
|
||||
|
||||
-- provide a list of values in a supported configuration
|
||||
ALTER ROLE CURRENT_USER SET search_path TO ",CitUs,.TeeN!?", alter_role, public;
|
||||
-- test user defined GUCs that appear to be a list, but instead a single string
|
||||
ALTER ROLE ALL SET public.myguc TO "Hello, World";
|
||||
-- test for configuration values that should not be downcased even when unquoted
|
||||
ALTER ROLE CURRENT_USER SET lc_messages TO 'C';
|
||||
-- add worker and check all settings are copied
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||
?column?
|
||||
|
@ -139,6 +146,27 @@ SELECT run_command_on_workers('SHOW enable_hashagg');
|
|||
(localhost,57638,t,off)
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers('SHOW search_path');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,""",CitUs,.TeeN!?"", alter_role, public")
|
||||
(localhost,57638,t,""",CitUs,.TeeN!?"", alter_role, public")
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers('SHOW lc_messages');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,C)
|
||||
(localhost,57638,t,C)
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers('SHOW public.myguc');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"Hello, World")
|
||||
(localhost,57638,t,"Hello, World")
|
||||
(2 rows)
|
||||
|
||||
-- reset to default values
|
||||
ALTER ROLE CURRENT_USER RESET enable_hashagg;
|
||||
SELECT run_command_on_workers('SHOW enable_hashagg');
|
||||
|
@ -226,4 +254,4 @@ SELECT run_command_on_workers('SHOW enable_hashjoin');
|
|||
(localhost,57638,t,on)
|
||||
(2 rows)
|
||||
|
||||
DROP SCHEMA alter_role CASCADE;
|
||||
DROP SCHEMA alter_role, ",CitUs,.TeeN!?" CASCADE;
|
||||
|
|
|
@ -0,0 +1,133 @@
|
|||
SET citus.next_shard_id TO 20080000;
|
||||
CREATE SCHEMA anonymous_columns;
|
||||
SET search_path TO anonymous_columns;
|
||||
CREATE TABLE t0 (a int PRIMARY KEY, b int, "?column?" text);
|
||||
SELECT create_distributed_table('t0', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO t0 VALUES (1, 2, 'hello'), (2, 4, 'world');
|
||||
SELECT "?column?" FROM t0 ORDER BY 1;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
hello
|
||||
world
|
||||
(2 rows)
|
||||
|
||||
WITH a AS (SELECT * FROM t0) SELECT "?column?" FROM a ORDER BY 1;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
hello
|
||||
world
|
||||
(2 rows)
|
||||
|
||||
WITH a AS (SELECT '' FROM t0) SELECT * FROM a;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(2 rows)
|
||||
|
||||
-- test CTE's that could be rewritten as subquery
|
||||
WITH a AS (SELECT '' FROM t0 GROUP BY a) SELECT * FROM a;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(2 rows)
|
||||
|
||||
WITH a AS (SELECT '' FROM t0 GROUP BY b) SELECT * FROM a;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(2 rows)
|
||||
|
||||
WITH a AS (SELECT '','' FROM t0 GROUP BY a) SELECT * FROM a;
|
||||
?column? | ?column?
|
||||
---------------------------------------------------------------------
|
||||
|
|
||||
|
|
||||
(2 rows)
|
||||
|
||||
WITH a AS (SELECT '','' FROM t0 GROUP BY b) SELECT * FROM a;
|
||||
?column? | ?column?
|
||||
---------------------------------------------------------------------
|
||||
|
|
||||
|
|
||||
(2 rows)
|
||||
|
||||
WITH a AS (SELECT 1, * FROM t0 WHERE a = 1) SELECT * FROM a;
|
||||
?column? | a | b | ?column?
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | 2 | hello
|
||||
(1 row)
|
||||
|
||||
-- test CTE's that are referenced multiple times and hence need to stay CTE's
|
||||
WITH a AS (SELECT '' FROM t0 WHERE a = 1) SELECT * FROM a, a b;
|
||||
?column? | ?column?
|
||||
---------------------------------------------------------------------
|
||||
|
|
||||
(1 row)
|
||||
|
||||
WITH a AS (SELECT '','' FROM t0 WHERE a = 42) SELECT * FROM a, a b;
|
||||
?column? | ?column? | ?column? | ?column?
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- test with explicit subqueries
|
||||
SELECT * FROM (SELECT a, '' FROM t0 GROUP BY a) as foo ORDER BY 1;
|
||||
a | ?column?
|
||||
---------------------------------------------------------------------
|
||||
1 |
|
||||
2 |
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM (SELECT a, '', '' FROM t0 GROUP BY a ) as foo ORDER BY 1;
|
||||
a | ?column? | ?column?
|
||||
---------------------------------------------------------------------
|
||||
1 | |
|
||||
2 | |
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM (SELECT b, '' FROM t0 GROUP BY b ) as foo ORDER BY 1;
|
||||
b | ?column?
|
||||
---------------------------------------------------------------------
|
||||
2 |
|
||||
4 |
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM (SELECT b, '', '' FROM t0 GROUP BY b ) as foo ORDER BY 1;
|
||||
b | ?column? | ?column?
|
||||
---------------------------------------------------------------------
|
||||
2 | |
|
||||
4 | |
|
||||
(2 rows)
|
||||
|
||||
-- some tests that follow very similar codeoaths
|
||||
SELECT a + 1 FROM t0 ORDER BY 1;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
3
|
||||
(2 rows)
|
||||
|
||||
SELECT a + 1, a - 1 FROM t0 ORDER BY 1;
|
||||
?column? | ?column?
|
||||
---------------------------------------------------------------------
|
||||
2 | 0
|
||||
3 | 1
|
||||
(2 rows)
|
||||
|
||||
WITH cte1 AS (SELECT row_to_json(row(a))->'f1' FROM t0) SELECT * FROM cte1 ORDER BY 1::text;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
2
|
||||
(2 rows)
|
||||
|
||||
-- clean up after test
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA anonymous_columns CASCADE;
|
|
@ -1,7 +1,7 @@
|
|||
-- This test relies on metadata being synced
|
||||
-- that's why is should be executed on MX schedule
|
||||
CREATE SCHEMA master_evaluation;
|
||||
SET search_path TO master_evaluation;
|
||||
CREATE SCHEMA coordinator_evaluation;
|
||||
SET search_path TO coordinator_evaluation;
|
||||
-- create a volatile function that returns the local node id
|
||||
CREATE OR REPLACE FUNCTION get_local_node_id_volatile()
|
||||
RETURNS INT AS $$
|
||||
|
@ -29,8 +29,8 @@ SELECT create_distributed_function('get_local_node_id_volatile_sum_with_param(in
|
|||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE master_evaluation_table (key int, value int);
|
||||
SELECT create_distributed_table('master_evaluation_table', 'key');
|
||||
CREATE TABLE coordinator_evaluation_table (key int, value int);
|
||||
SELECT create_distributed_table('coordinator_evaluation_table', 'key');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -44,16 +44,16 @@ SELECT get_local_node_id_volatile();
|
|||
(1 row)
|
||||
|
||||
-- load data
|
||||
INSERT INTO master_evaluation_table SELECT i, i FROM generate_series(0,100)i;
|
||||
INSERT INTO coordinator_evaluation_table SELECT i, i FROM generate_series(0,100)i;
|
||||
-- we expect that the function is evaluated on the worker node, so we should get a row
|
||||
SELECT get_local_node_id_volatile() > 0 FROM master_evaluation_table WHERE key = 1;
|
||||
SELECT get_local_node_id_volatile() > 0 FROM coordinator_evaluation_table WHERE key = 1;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- make sure that it is also true for fast-path router queries with paramaters
|
||||
PREPARE fast_path_router_with_param(int) AS SELECT get_local_node_id_volatile() > 0 FROM master_evaluation_table WHERE key = $1;
|
||||
PREPARE fast_path_router_with_param(int) AS SELECT get_local_node_id_volatile() > 0 FROM coordinator_evaluation_table WHERE key = $1;
|
||||
execute fast_path_router_with_param(1);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -103,13 +103,13 @@ execute fast_path_router_with_param(8);
|
|||
(1 row)
|
||||
|
||||
-- same query as fast_path_router_with_param, but with consts
|
||||
SELECT get_local_node_id_volatile() > 0 FROM master_evaluation_table WHERE key = 1;
|
||||
SELECT get_local_node_id_volatile() > 0 FROM coordinator_evaluation_table WHERE key = 1;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
PREPARE router_with_param(int) AS SELECT get_local_node_id_volatile() > 0 FROM master_evaluation_table m1 JOIN master_evaluation_table m2 USING(key) WHERE key = $1;
|
||||
PREPARE router_with_param(int) AS SELECT get_local_node_id_volatile() > 0 FROM coordinator_evaluation_table m1 JOIN coordinator_evaluation_table m2 USING(key) WHERE key = $1;
|
||||
execute router_with_param(1);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -159,21 +159,21 @@ execute router_with_param(8);
|
|||
(1 row)
|
||||
|
||||
-- same query as router_with_param, but with consts
|
||||
SELECT get_local_node_id_volatile() > 0 FROM master_evaluation_table m1 JOIN master_evaluation_table m2 USING(key) WHERE key = 1;
|
||||
SELECT get_local_node_id_volatile() > 0 FROM coordinator_evaluation_table m1 JOIN coordinator_evaluation_table m2 USING(key) WHERE key = 1;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- for multi-shard queries, we still expect the evaluation to happen on the workers
|
||||
SELECT count(*), max(get_local_node_id_volatile()) != 0, min(get_local_node_id_volatile()) != 0 FROM master_evaluation_table;
|
||||
SELECT count(*), max(get_local_node_id_volatile()) != 0, min(get_local_node_id_volatile()) != 0 FROM coordinator_evaluation_table;
|
||||
count | ?column? | ?column?
|
||||
---------------------------------------------------------------------
|
||||
101 | t | t
|
||||
(1 row)
|
||||
|
||||
-- when executed locally, we expect to get the result from the coordinator
|
||||
SELECT (SELECT count(*) FROM master_evaluation_table), get_local_node_id_volatile() = 0;
|
||||
SELECT (SELECT count(*) FROM coordinator_evaluation_table), get_local_node_id_volatile() = 0;
|
||||
count | ?column?
|
||||
---------------------------------------------------------------------
|
||||
101 | t
|
||||
|
@ -181,7 +181,7 @@ SELECT (SELECT count(*) FROM master_evaluation_table), get_local_node_id_volatil
|
|||
|
||||
-- make sure that we get the results from the workers when the query is sent to workers
|
||||
SET citus.task_assignment_policy TO "round-robin";
|
||||
SELECT (SELECT count(*) FROM master_evaluation_table), get_local_node_id_volatile() = 0;
|
||||
SELECT (SELECT count(*) FROM coordinator_evaluation_table), get_local_node_id_volatile() = 0;
|
||||
count | ?column?
|
||||
---------------------------------------------------------------------
|
||||
101 | f
|
||||
|
@ -189,13 +189,13 @@ SELECT (SELECT count(*) FROM master_evaluation_table), get_local_node_id_volatil
|
|||
|
||||
RESET citus.task_assignment_policy;
|
||||
-- for multi-shard SELECTs, we don't try to evaluate on the coordinator
|
||||
SELECT min(get_local_node_id_volatile()) > 0 FROM master_evaluation_table;
|
||||
SELECT min(get_local_node_id_volatile()) > 0 FROM coordinator_evaluation_table;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) > 0 FROM master_evaluation_table WHERE value >= get_local_node_id_volatile();
|
||||
SELECT count(*) > 0 FROM coordinator_evaluation_table WHERE value >= get_local_node_id_volatile();
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
|
@ -204,7 +204,7 @@ SELECT count(*) > 0 FROM master_evaluation_table WHERE value >= get_local_node_i
|
|||
-- let's have some tests around expressions
|
||||
-- for modifications, we expect the evaluation to happen on the coordinator
|
||||
-- thus the results should be 0
|
||||
PREPARE insert_with_param_expression(int) AS INSERT INTO master_evaluation_table (key, value) VALUES ($1 + get_local_node_id_volatile(), $1 + get_local_node_id_volatile()) RETURNING key, value;
|
||||
PREPARE insert_with_param_expression(int) AS INSERT INTO coordinator_evaluation_table (key, value) VALUES ($1 + get_local_node_id_volatile(), $1 + get_local_node_id_volatile()) RETURNING key, value;
|
||||
EXECUTE insert_with_param_expression(0);
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
|
@ -249,7 +249,7 @@ EXECUTE insert_with_param_expression(0);
|
|||
|
||||
-- for modifications, we expect the evaluation to happen on the coordinator
|
||||
-- thus the results should be 0
|
||||
PREPARE insert_with_param(int) AS INSERT INTO master_evaluation_table (key, value) VALUES ($1, $1) RETURNING key, value;
|
||||
PREPARE insert_with_param(int) AS INSERT INTO coordinator_evaluation_table (key, value) VALUES ($1, $1) RETURNING key, value;
|
||||
EXECUTE insert_with_param(0 + get_local_node_id_volatile());
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
|
@ -292,7 +292,7 @@ EXECUTE insert_with_param(0 + get_local_node_id_volatile());
|
|||
0 | 0
|
||||
(1 row)
|
||||
|
||||
PREPARE router_select_with_param_expression(int) AS SELECT value > 0 FROM master_evaluation_table WHERE key = $1 + get_local_node_id_volatile();
|
||||
PREPARE router_select_with_param_expression(int) AS SELECT value > 0 FROM coordinator_evaluation_table WHERE key = $1 + get_local_node_id_volatile();
|
||||
-- for selects, we expect the evaluation to happen on the workers
|
||||
-- this means that the query should be hitting multiple workers
|
||||
SET client_min_messages TO DEBUG2;
|
||||
|
@ -353,7 +353,7 @@ DEBUG: Router planner cannot handle multi-shard select queries
|
|||
t
|
||||
(1 row)
|
||||
|
||||
PREPARE router_select_with_param(int) AS SELECT DISTINCT value FROM master_evaluation_table WHERE key = $1;
|
||||
PREPARE router_select_with_param(int) AS SELECT DISTINCT value FROM coordinator_evaluation_table WHERE key = $1;
|
||||
-- this time the parameter itself is a function, so should be evaluated
|
||||
-- on the coordinator
|
||||
EXECUTE router_select_with_param(0 + get_local_node_id_volatile());
|
||||
|
@ -460,7 +460,7 @@ EXECUTE router_select_with_param(get_local_node_id_volatile());
|
|||
(1 row)
|
||||
|
||||
-- this time use the parameter inside the function
|
||||
PREPARE router_select_with_parameter_in_function(int) AS SELECT bool_and(get_local_node_id_volatile_sum_with_param($1) > 1) FROM master_evaluation_table WHERE key = get_local_node_id_volatile_sum_with_param($1);
|
||||
PREPARE router_select_with_parameter_in_function(int) AS SELECT bool_and(get_local_node_id_volatile_sum_with_param($1) > 1) FROM coordinator_evaluation_table WHERE key = get_local_node_id_volatile_sum_with_param($1);
|
||||
EXECUTE router_select_with_parameter_in_function(0);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
bool_and
|
||||
|
@ -514,8 +514,8 @@ DEBUG: Router planner cannot handle multi-shard select queries
|
|||
RESET client_min_messages;
|
||||
RESET citus.log_remote_commands;
|
||||
-- numeric has different casting affects, so some tests on that
|
||||
CREATE TABLE master_evaluation_table_2 (key numeric, value numeric);
|
||||
SELECT create_distributed_table('master_evaluation_table_2', 'key');
|
||||
CREATE TABLE coordinator_evaluation_table_2 (key numeric, value numeric);
|
||||
SELECT create_distributed_table('coordinator_evaluation_table_2', 'key');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -529,13 +529,13 @@ BEGIN
|
|||
RETURN trunc(random() * (end_int-start_int) + start_int);
|
||||
END;
|
||||
$$ LANGUAGE 'plpgsql' STRICT;
|
||||
CREATE OR REPLACE PROCEDURE master_evaluation.test_procedure(int)
|
||||
CREATE OR REPLACE PROCEDURE coordinator_evaluation.test_procedure(int)
|
||||
LANGUAGE plpgsql
|
||||
AS $procedure$
|
||||
DECLARE filterKey INTEGER;
|
||||
BEGIN
|
||||
filterKey := round(master_evaluation.TEST_RANDOM(1,1)) + $1;
|
||||
PERFORM DISTINCT value FROM master_evaluation_table_2 WHERE key = filterKey;
|
||||
filterKey := round(coordinator_evaluation.TEST_RANDOM(1,1)) + $1;
|
||||
PERFORM DISTINCT value FROM coordinator_evaluation_table_2 WHERE key = filterKey;
|
||||
END;
|
||||
$procedure$;
|
||||
-- we couldn't find a meaningful query to write for this
|
||||
|
@ -567,13 +567,13 @@ DEBUG: Deferred pruning for a fast-path router query
|
|||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
CALL test_procedure(100);
|
||||
CREATE OR REPLACE PROCEDURE master_evaluation.test_procedure_2(int)
|
||||
CREATE OR REPLACE PROCEDURE coordinator_evaluation.test_procedure_2(int)
|
||||
LANGUAGE plpgsql
|
||||
AS $procedure$
|
||||
DECLARE filterKey INTEGER;
|
||||
BEGIN
|
||||
filterKey := round(master_evaluation.TEST_RANDOM(1,1)) + $1;
|
||||
INSERT INTO master_evaluation_table_2 VALUES (filterKey, filterKey);
|
||||
filterKey := round(coordinator_evaluation.TEST_RANDOM(1,1)) + $1;
|
||||
INSERT INTO coordinator_evaluation_table_2 VALUES (filterKey, filterKey);
|
||||
END;
|
||||
$procedure$;
|
||||
RESET citus.log_remote_commands ;
|
||||
|
@ -586,11 +586,11 @@ CALL test_procedure_2(100);
|
|||
CALL test_procedure_2(100);
|
||||
CALL test_procedure_2(100);
|
||||
CALL test_procedure_2(100);
|
||||
SELECT count(*) FROM master_evaluation_table_2 WHERE key = 101;
|
||||
SELECT count(*) FROM coordinator_evaluation_table_2 WHERE key = 101;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
7
|
||||
(1 row)
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SCHEMA master_evaluation CASCADE;
|
||||
DROP SCHEMA coordinator_evaluation CASCADE;
|
File diff suppressed because it is too large
Load Diff
|
@ -7,10 +7,10 @@
|
|||
-- (b) Local Execution vs Remote Execution
|
||||
-- (c) Parameters on distribution key vs Parameters on non-dist key
|
||||
-- vs Non-parametrized queries
|
||||
-- (d) Master Function Evaluation Required vs
|
||||
-- Master Function Evaluation Not Required
|
||||
CREATE SCHEMA master_evaluation_combinations;
|
||||
SET search_path TO master_evaluation_combinations;
|
||||
-- (d) Coordinator Function Evaluation Required vs
|
||||
-- Coordinator Function Evaluation Not Required
|
||||
CREATE SCHEMA coordinator_evaluation_combinations;
|
||||
SET search_path TO coordinator_evaluation_combinations;
|
||||
SET citus.next_shard_id TO 1170000;
|
||||
-- create a volatile function that returns the local node id
|
||||
CREATE OR REPLACE FUNCTION get_local_node_id_volatile()
|
||||
|
@ -824,10 +824,10 @@ EXECUTE router_with_only_function;
|
|||
|
||||
\c - - - :worker_2_port
|
||||
SET citus.log_local_commands TO ON;
|
||||
SET search_path TO master_evaluation_combinations;
|
||||
SET search_path TO coordinator_evaluation_combinations;
|
||||
-- show that the data with user_id = 3 is local
|
||||
SELECT count(*) FROM user_info_data WHERE user_id = 3;
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
|
@ -836,63 +836,63 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM master_eva
|
|||
-- make sure that it is also true for fast-path router queries with paramaters
|
||||
PREPARE fast_path_router_with_param(int) AS SELECT count(*) FROM user_info_data WHERE user_id = $1;
|
||||
execute fast_path_router_with_param(3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
execute fast_path_router_with_param(3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
execute fast_path_router_with_param(3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
execute fast_path_router_with_param(3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
execute fast_path_router_with_param(3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
execute fast_path_router_with_param(3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
execute fast_path_router_with_param(3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
execute fast_path_router_with_param(3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT get_local_node_id_volatile() > 0 FROM user_info_data WHERE user_id = 3;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
|
@ -901,49 +901,49 @@ NOTICE: executing the command locally: SELECT (master_evaluation_combinations.g
|
|||
-- make sure that it is also true for fast-path router queries with paramaters
|
||||
PREPARE fast_path_router_with_param_and_func(int) AS SELECT get_local_node_id_volatile() > 0 FROM user_info_data WHERE user_id = $1;
|
||||
execute fast_path_router_with_param_and_func(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
execute fast_path_router_with_param_and_func(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
execute fast_path_router_with_param_and_func(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
execute fast_path_router_with_param_and_func(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
execute fast_path_router_with_param_and_func(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
execute fast_path_router_with_param_and_func(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
execute fast_path_router_with_param_and_func(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
|
@ -958,56 +958,56 @@ execute fast_path_router_with_param_and_func(8);
|
|||
PREPARE fast_path_router_with_param_and_func_on_non_dist_key(int) AS
|
||||
SELECT get_local_node_id_volatile() > 0 FROM user_info_data WHERE user_id = 3 AND user_index = $1;
|
||||
EXECUTE fast_path_router_with_param_and_func_on_non_dist_key(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_and_func_on_non_dist_key(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_and_func_on_non_dist_key(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_and_func_on_non_dist_key(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_and_func_on_non_dist_key(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_and_func_on_non_dist_key(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_and_func_on_non_dist_key(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT get_local_node_id_volatile() > 0 FROM user_info_data WHERE user_id = 3 AND u_data = ('name3', 23)::user_data;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) ROW('name3'::text, 23)::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) ROW('name3'::text, 23)::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
|
@ -1015,77 +1015,77 @@ NOTICE: executing the command locally: SELECT (master_evaluation_combinations.g
|
|||
|
||||
PREPARE fast_path_router_with_param_on_non_dist_key_and_func(user_data) AS SELECT get_local_node_id_volatile() > 0 FROM user_info_data WHERE user_id = 3 AND u_data = $1;
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM user_info_data WHERE user_id = 3 AND u_data = ('name3', 23)::user_data;
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) ROW('name3'::text, 23)::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) ROW('name3'::text, 23)::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
|
@ -1093,63 +1093,63 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM master_eva
|
|||
|
||||
PREPARE fast_path_router_with_param_on_non_dist_key(user_data) AS SELECT count(*) FROM user_info_data WHERE user_id = 3 AND u_data = $1;
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
|
@ -1157,63 +1157,63 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM master_eva
|
|||
|
||||
PREPARE fast_path_router_with_only_function AS SELECT get_local_node_id_volatile() > 0 FROM user_info_data WHERE user_id = 3;
|
||||
EXECUTE fast_path_router_with_only_function;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_only_function;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_only_function;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_only_function;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_only_function;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_only_function;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_only_function;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE fast_path_router_with_only_function;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE (user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM user_info_data u1 JOIN user_info_data u2 USING (user_id) WHERE user_id = 3;
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
|
@ -1222,63 +1222,63 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_ev
|
|||
-- make sure that it is also true for fast-path router queries with paramaters
|
||||
PREPARE router_with_param(int) AS SELECT count(*) FROM user_info_data u1 JOIN user_info_data u2 USING (user_id) WHERE user_id = $1;
|
||||
execute router_with_param(3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
execute router_with_param(3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
execute router_with_param(3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
execute router_with_param(3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
execute router_with_param(3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
execute router_with_param(3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
execute router_with_param(3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
execute router_with_param(3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT get_local_node_id_volatile() > 0 FROM user_info_data m1 JOIN user_info_data m2 USING(user_id) WHERE m1.user_id = 3;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
|
@ -1286,56 +1286,56 @@ NOTICE: executing the command locally: SELECT (master_evaluation_combinations.g
|
|||
|
||||
PREPARE router_with_param_and_func(int) AS SELECT get_local_node_id_volatile() > 0 FROM user_info_data m1 JOIN user_info_data m2 USING(user_id) WHERE m1.user_id = $1;
|
||||
execute router_with_param_and_func(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
execute router_with_param_and_func(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
execute router_with_param_and_func(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
execute router_with_param_and_func(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
execute router_with_param_and_func(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
execute router_with_param_and_func(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
execute router_with_param_and_func(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
execute router_with_param_and_func(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) $1)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
|
@ -1344,49 +1344,49 @@ NOTICE: executing the command locally: SELECT (master_evaluation_combinations.g
|
|||
PREPARE router_with_param_and_func_on_non_dist_key(int) AS
|
||||
SELECT get_local_node_id_volatile() > 0 FROM user_info_data WHERE user_id = 3 AND user_id = 3 AND user_index = $1;
|
||||
EXECUTE router_with_param_and_func_on_non_dist_key(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_and_func_on_non_dist_key(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_and_func_on_non_dist_key(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_and_func_on_non_dist_key(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_and_func_on_non_dist_key(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_and_func_on_non_dist_key(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_and_func_on_non_dist_key(3);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM master_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM coordinator_evaluation_combinations.user_info_data_1170001 user_info_data WHERE ((user_id OPERATOR(pg_catalog.=) 3) AND (user_id OPERATOR(pg_catalog.=) 3) AND (user_index OPERATOR(pg_catalog.=) $1))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
|
@ -1394,21 +1394,21 @@ NOTICE: executing the command locally: SELECT (master_evaluation_combinations.g
|
|||
|
||||
-- same query as router_with_param, but with consts
|
||||
SELECT get_local_node_id_volatile() > 0 FROM user_info_data m1 JOIN user_info_data m2 USING(user_id) WHERE m1.user_id = 3;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 m1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 m2(user_id, u_data, user_index) USING (user_id)) WHERE (m1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM user_info_data u1 JOIN user_info_data u2 USING (user_id) WHERE u1.user_id = 3 AND u1.u_data = ('name3', 23)::user_data;
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) ROW('name3'::text, 23)::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) ROW('name3'::text, 23)::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM user_info_data u1 JOIN user_info_data u2 USING (user_id) WHERE u1.user_id = 3 AND u1.u_data = ('name3', 23)::user_data;
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) ROW('name3'::text, 23)::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) ROW('name3'::text, 23)::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
|
@ -1416,70 +1416,70 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_ev
|
|||
|
||||
PREPARE router_with_param_on_non_dist_key(user_data) AS SELECT count(*) FROM user_info_data u1 JOIN user_info_data u2 USING (user_id) WHERE u1.user_id = 3 AND u1.u_data = $1;
|
||||
EXECUTE router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_on_non_dist_key(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT get_local_node_id_volatile() > 0 FROM user_info_data u1 JOIN user_info_data u2 USING (user_id) WHERE u1.user_id = 3 AND u1.u_data = ('name3', 23)::user_data;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) ROW('name3'::text, 23)::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) ROW('name3'::text, 23)::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
|
@ -1487,70 +1487,70 @@ NOTICE: executing the command locally: SELECT (master_evaluation_combinations.g
|
|||
|
||||
PREPARE router_with_param_on_non_dist_key_and_func(user_data) AS SELECT get_local_node_id_volatile() > 0 FROM user_info_data u1 JOIN user_info_data u2 USING (user_id) WHERE u1.user_id = 3 AND u1.u_data = $1;
|
||||
EXECUTE router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_param_on_non_dist_key_and_func(('name3', 23)::user_data);
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM user_info_data u1 JOIN user_info_data u2 USING (user_id) WHERE user_id = 3 AND u1.u_data = ('name3', 23)::user_data;
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) ROW('name3'::text, 23)::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) 3) AND (u1.u_data OPERATOR(pg_catalog.=) ROW('name3'::text, 23)::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
|
@ -1558,70 +1558,70 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_ev
|
|||
|
||||
PREPARE router_with_two_params(user_data, int) AS SELECT count(*) FROM user_info_data u1 JOIN user_info_data u2 USING (user_id) WHERE user_id = $2 AND u1.u_data = $1;
|
||||
EXECUTE router_with_two_params(('name3', 23)::user_data, 3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_two_params(('name3', 23)::user_data, 3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_two_params(('name3', 23)::user_data, 3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_two_params(('name3', 23)::user_data, 3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_two_params(('name3', 23)::user_data, 3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_two_params(('name3', 23)::user_data, 3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_two_params(('name3', 23)::user_data, 3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_two_params(('name3', 23)::user_data, 3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_two_params(('name3', 23)::user_data, 3);
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::master_evaluation_combinations.user_data))
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE ((u1.user_id OPERATOR(pg_catalog.=) $2) AND (u1.u_data OPERATOR(pg_catalog.=) $1::coordinator_evaluation_combinations.user_data))
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT get_local_node_id_volatile() > 0 FROM user_info_data u1 JOIN user_info_data u2 USING(user_id) WHERE user_id = 3;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
|
@ -1629,56 +1629,56 @@ NOTICE: executing the command locally: SELECT (master_evaluation_combinations.g
|
|||
|
||||
PREPARE router_with_only_function AS SELECT get_local_node_id_volatile() > 0 FROM user_info_data u1 JOIN user_info_data u2 USING(user_id) WHERE user_id = 3;
|
||||
EXECUTE router_with_only_function;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_only_function;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_only_function;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_only_function;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_only_function;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_only_function;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_only_function;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_with_only_function;
|
||||
NOTICE: executing the command locally: SELECT (master_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (master_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN master_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
NOTICE: executing the command locally: SELECT (coordinator_evaluation_combinations.get_local_node_id_volatile() OPERATOR(pg_catalog.>) 0) FROM (coordinator_evaluation_combinations.user_info_data_1170001 u1(user_id, u_data, user_index) JOIN coordinator_evaluation_combinations.user_info_data_1170001 u2(user_id, u_data, user_index) USING (user_id)) WHERE (u1.user_id OPERATOR(pg_catalog.=) 3)
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
|
@ -1687,4 +1687,4 @@ NOTICE: executing the command locally: SELECT (master_evaluation_combinations.g
|
|||
-- suppress notices
|
||||
\c - - - :master_port
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SCHEMA master_evaluation_combinations CASCADE;
|
||||
DROP SCHEMA coordinator_evaluation_combinations CASCADE;
|
|
@ -198,6 +198,23 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinato
|
|||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
-- repartition queries should work fine
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT count(*) FROM test t1, test t2 WHERE t1.x = t2.y;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT count(*) FROM test t1, test t2 WHERE t1.x = t2.y;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
END;
|
||||
BEGIN;
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
-- trigger local execution
|
||||
|
@ -439,6 +456,7 @@ BEGIN;
|
|||
-- copying task
|
||||
INSERT INTO dist_table SELECT a + 1 FROM dist_table;
|
||||
ROLLBACK;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
BEGIN;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE dist_table1(a int);
|
||||
|
@ -459,11 +477,12 @@ RESET citus.enable_cte_inlining;
|
|||
DELETE FROM test;
|
||||
DROP TABLE test;
|
||||
DROP TABLE dist_table;
|
||||
DROP TABLE ref;
|
||||
NOTICE: executing the command locally: DROP TABLE IF EXISTS coordinator_shouldhaveshards.ref_xxxxx CASCADE
|
||||
CONTEXT: SQL statement "SELECT master_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name)"
|
||||
PL/pgSQL function citus_drop_trigger() line 19 at PERFORM
|
||||
DROP SCHEMA coordinator_shouldhaveshards CASCADE;
|
||||
NOTICE: drop cascades to 3 other objects
|
||||
DETAIL: drop cascades to table ref
|
||||
drop cascades to table ref_1503016
|
||||
drop cascades to table local
|
||||
NOTICE: drop cascades to table local
|
||||
SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', false);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -165,7 +165,7 @@ DEBUG: Router planner cannot handle multi-shard select queries
|
|||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan XXX_3 for subquery SELECT key, value, other_value, (SELECT 1) FROM (SELECT cte_1.key, cte_1.value, cte_1.other_value FROM (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) cte_1) foo
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) top_cte, (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value, intermediate_result."?column?" FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb, "?column?" integer)) bar
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) top_cte, (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value, intermediate_result."?column?" FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb, "?column?" integer)) bar(key, value, other_value, "?column?")
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
count
|
||||
|
@ -249,7 +249,7 @@ DEBUG: CTE cte_2 is going to be inlined via distributed planning
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT key, value, other_value FROM cte_inline.test_table
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT cte_1.key, cte_1.value, cte_1.other_value, (SELECT 1) FROM (SELECT test_table.key, test_table.value, test_table.other_value FROM cte_inline.test_table) cte_1) foo JOIN (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) cte_2 ON (true))
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT cte_1.key, cte_1.value, cte_1.other_value, (SELECT 1) FROM (SELECT test_table.key, test_table.value, test_table.other_value FROM cte_inline.test_table) cte_1) foo(key, value, other_value, "?column?") JOIN (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) cte_2 ON (true))
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -153,7 +153,7 @@ DEBUG: Router planner cannot handle multi-shard select queries
|
|||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan XXX_3 for subquery SELECT key, value, other_value, (SELECT 1) FROM (SELECT cte_1.key, cte_1.value, cte_1.other_value FROM (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) cte_1) foo
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) top_cte, (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value, intermediate_result."?column?" FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb, "?column?" integer)) bar
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) top_cte, (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value, intermediate_result."?column?" FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb, "?column?" integer)) bar(key, value, other_value, "?column?")
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
count
|
||||
|
@ -237,7 +237,7 @@ DEBUG: CTE cte_2 is going to be inlined via distributed planning
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT key, value, other_value FROM cte_inline.test_table
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT cte_1.key, cte_1.value, cte_1.other_value, (SELECT 1) FROM (SELECT test_table.key, test_table.value, test_table.other_value FROM cte_inline.test_table) cte_1) foo JOIN (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) cte_2 ON (true))
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT cte_1.key, cte_1.value, cte_1.other_value, (SELECT 1) FROM (SELECT test_table.key, test_table.value, test_table.other_value FROM cte_inline.test_table) cte_1) foo(key, value, other_value, "?column?") JOIN (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) cte_2 ON (true))
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -0,0 +1,218 @@
|
|||
CREATE SCHEMA cursors;
|
||||
SET search_path TO cursors;
|
||||
CREATE TABLE distributed_table (key int, value text);
|
||||
SELECT create_distributed_table('distributed_table', 'key');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- load some data, but not very small amounts because RETURN QUERY in plpgsql
|
||||
-- hard codes the cursor fetch to 50 rows on PG 12, though they might increase
|
||||
-- it sometime in the future, so be mindful
|
||||
INSERT INTO distributed_table SELECT i % 10, i::text FROM generate_series(0, 1000) i;
|
||||
CREATE OR REPLACE FUNCTION simple_cursor_on_dist_table(cursor_name refcursor) RETURNS refcursor AS '
|
||||
BEGIN
|
||||
OPEN $1 FOR SELECT DISTINCT key FROM distributed_table ORDER BY 1;
|
||||
RETURN $1;
|
||||
END;
|
||||
' LANGUAGE plpgsql;
|
||||
CREATE OR REPLACE FUNCTION cursor_with_intermediate_result_on_dist_table(cursor_name refcursor) RETURNS refcursor AS '
|
||||
BEGIN
|
||||
OPEN $1 FOR
|
||||
WITH cte_1 AS (SELECT * FROM distributed_table OFFSET 0)
|
||||
SELECT DISTINCT key FROM distributed_table WHERE value in (SELECT value FROM cte_1) ORDER BY 1;
|
||||
RETURN $1;
|
||||
END;
|
||||
' LANGUAGE plpgsql;
|
||||
CREATE OR REPLACE FUNCTION cursor_with_intermediate_result_on_dist_table_with_param(cursor_name refcursor, filter text) RETURNS refcursor AS '
|
||||
BEGIN
|
||||
OPEN $1 FOR
|
||||
WITH cte_1 AS (SELECT * FROM distributed_table WHERE value < $2 OFFSET 0)
|
||||
SELECT DISTINCT key FROM distributed_table WHERE value in (SELECT value FROM cte_1) ORDER BY 1;
|
||||
RETURN $1;
|
||||
END;
|
||||
' LANGUAGE plpgsql;
|
||||
-- pretty basic query with cursors
|
||||
-- Citus should plan/execute once and pull
|
||||
-- the results to coordinator, then serve it
|
||||
-- from the coordinator
|
||||
BEGIN;
|
||||
SELECT simple_cursor_on_dist_table('cursor_1');
|
||||
simple_cursor_on_dist_table
|
||||
---------------------------------------------------------------------
|
||||
cursor_1
|
||||
(1 row)
|
||||
|
||||
SET LOCAL citus.log_intermediate_results TO ON;
|
||||
SET LOCAL client_min_messages TO DEBUG1;
|
||||
FETCH 5 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
(5 rows)
|
||||
|
||||
FETCH 50 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
6
|
||||
7
|
||||
8
|
||||
9
|
||||
(5 rows)
|
||||
|
||||
FETCH ALL IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
SELECT cursor_with_intermediate_result_on_dist_table('cursor_1');
|
||||
cursor_with_intermediate_result_on_dist_table
|
||||
---------------------------------------------------------------------
|
||||
cursor_1
|
||||
(1 row)
|
||||
|
||||
-- multiple FETCH commands should not trigger re-running the subplans
|
||||
SET LOCAL citus.log_intermediate_results TO ON;
|
||||
SET LOCAL client_min_messages TO DEBUG1;
|
||||
FETCH 5 IN cursor_1;
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
(5 rows)
|
||||
|
||||
FETCH 1 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
FETCH ALL IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
6
|
||||
7
|
||||
8
|
||||
9
|
||||
(4 rows)
|
||||
|
||||
FETCH 5 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
SELECT cursor_with_intermediate_result_on_dist_table_with_param('cursor_1', '600');
|
||||
cursor_with_intermediate_result_on_dist_table_with_param
|
||||
---------------------------------------------------------------------
|
||||
cursor_1
|
||||
(1 row)
|
||||
|
||||
-- multiple FETCH commands should not trigger re-running the subplans
|
||||
-- also test with parameters
|
||||
SET LOCAL citus.log_intermediate_results TO ON;
|
||||
SET LOCAL client_min_messages TO DEBUG1;
|
||||
FETCH 1 IN cursor_1;
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
FETCH 1 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
FETCH 1 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
FETCH 1 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
FETCH 1 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
FETCH 1 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
FETCH ALL IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
6
|
||||
7
|
||||
8
|
||||
9
|
||||
(4 rows)
|
||||
|
||||
COMMIT;
|
||||
CREATE OR REPLACE FUNCTION value_counter() RETURNS TABLE(counter text) LANGUAGE PLPGSQL AS $function$
|
||||
BEGIN
|
||||
return query
|
||||
WITH cte AS
|
||||
(SELECT dt.value
|
||||
FROM distributed_table dt
|
||||
WHERE dt.value in
|
||||
(SELECT value
|
||||
FROM distributed_table p
|
||||
GROUP BY p.value
|
||||
HAVING count(*) > 0))
|
||||
|
||||
SELECT * FROM cte;
|
||||
END;
|
||||
$function$ ;
|
||||
SET citus.log_intermediate_results TO ON;
|
||||
SET client_min_messages TO DEBUG1;
|
||||
\set VERBOSITY terse
|
||||
SELECT count(*) from (SELECT value_counter()) as foo;
|
||||
DEBUG: CTE cte is going to be inlined via distributed planning
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT value FROM cursors.distributed_table p GROUP BY value HAVING (count(*) OPERATOR(pg_catalog.>) 0)
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT value FROM (SELECT dt.value FROM cursors.distributed_table dt WHERE (dt.value OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)))) cte
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1001
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
SELECT count(*) from (SELECT value_counter()) as foo;
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1001
|
||||
(1 row)
|
||||
|
||||
COMMIT;
|
||||
-- suppress NOTICEs
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SCHEMA cursors CASCADE;
|
|
@ -299,7 +299,7 @@ INSERT INTO
|
|||
VALUES ('3', (WITH vals AS (SELECT 3) select * from vals));
|
||||
DEBUG: CTE vals is going to be inlined via distributed planning
|
||||
DEBUG: generating subplan XXX_1 for CTE vals: SELECT 3
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: INSERT INTO recursive_dml_queries.second_distributed_table (tenant_id, dept) VALUES ('3'::text, (SELECT vals."?column?" FROM (SELECT intermediate_result."?column?" FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer)) vals))
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: INSERT INTO recursive_dml_queries.second_distributed_table (tenant_id, dept) VALUES ('3'::text, (SELECT vals."?column?" FROM (SELECT intermediate_result."?column?" FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer)) vals("?column?")))
|
||||
ERROR: subqueries are not supported within INSERT queries
|
||||
HINT: Try rewriting your queries with 'INSERT INTO ... SELECT' syntax.
|
||||
INSERT INTO
|
||||
|
|
|
@ -0,0 +1,370 @@
|
|||
\c - - - :master_port
|
||||
CREATE SCHEMA single_node;
|
||||
SET search_path TO single_node;
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 93630500;
|
||||
SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE test(x int, y int);
|
||||
SELECT create_distributed_table('test','x');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE ref(a int, b int);
|
||||
SELECT create_reference_table('ref');
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE local(c int, d int);
|
||||
INSERT INTO test VALUES (1, 2), (3, 4), (5, 6), (2, 7), (4, 5);
|
||||
INSERT INTO ref VALUES (1, 2), (5, 6), (7, 8);
|
||||
INSERT INTO local VALUES (1, 2), (3, 4), (7, 8);
|
||||
-- Check repartion joins are supported
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
|
||||
x | y | x | y
|
||||
---------------------------------------------------------------------
|
||||
2 | 7 | 1 | 2
|
||||
4 | 5 | 3 | 4
|
||||
5 | 6 | 4 | 5
|
||||
(3 rows)
|
||||
|
||||
SET citus.enable_single_hash_repartition_joins TO ON;
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
|
||||
x | y | x | y
|
||||
---------------------------------------------------------------------
|
||||
2 | 7 | 1 | 2
|
||||
4 | 5 | 3 | 4
|
||||
5 | 6 | 4 | 5
|
||||
(3 rows)
|
||||
|
||||
RESET citus.enable_single_hash_repartition_joins;
|
||||
SET citus.task_assignment_policy TO 'round-robin';
|
||||
SET citus.enable_single_hash_repartition_joins TO ON;
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
|
||||
x | y | x | y
|
||||
---------------------------------------------------------------------
|
||||
2 | 7 | 1 | 2
|
||||
4 | 5 | 3 | 4
|
||||
5 | 6 | 4 | 5
|
||||
(3 rows)
|
||||
|
||||
SET citus.task_assignment_policy TO 'greedy';
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
|
||||
x | y | x | y
|
||||
---------------------------------------------------------------------
|
||||
2 | 7 | 1 | 2
|
||||
4 | 5 | 3 | 4
|
||||
5 | 6 | 4 | 5
|
||||
(3 rows)
|
||||
|
||||
SET citus.task_assignment_policy TO 'first-replica';
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
|
||||
x | y | x | y
|
||||
---------------------------------------------------------------------
|
||||
2 | 7 | 1 | 2
|
||||
4 | 5 | 3 | 4
|
||||
5 | 6 | 4 | 5
|
||||
(3 rows)
|
||||
|
||||
RESET citus.enable_repartition_joins;
|
||||
-- connect to the follower and check that a simple select query works, the follower
|
||||
-- is still in the default cluster and will send queries to the primary nodes
|
||||
\c - - - :follower_master_port
|
||||
SET search_path TO single_node;
|
||||
SELECT * FROM test WHERE x = 1;
|
||||
x | y
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM test ORDER BY x;
|
||||
x | y
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
2 | 7
|
||||
3 | 4
|
||||
4 | 5
|
||||
5 | 6
|
||||
(5 rows)
|
||||
|
||||
SELECT count(*) FROM ref;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM ref ORDER BY a;
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
5 | 6
|
||||
7 | 8
|
||||
(3 rows)
|
||||
|
||||
SELECT * FROM test, ref WHERE x = a ORDER BY x;
|
||||
x | y | a | b
|
||||
---------------------------------------------------------------------
|
||||
1 | 2 | 1 | 2
|
||||
5 | 6 | 5 | 6
|
||||
(2 rows)
|
||||
|
||||
SELECT count(*) FROM local;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM local ORDER BY c;
|
||||
c | d
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
3 | 4
|
||||
7 | 8
|
||||
(3 rows)
|
||||
|
||||
SELECT * FROM ref, local WHERE a = c ORDER BY a;
|
||||
a | b | c | d
|
||||
---------------------------------------------------------------------
|
||||
1 | 2 | 1 | 2
|
||||
7 | 8 | 7 | 8
|
||||
(2 rows)
|
||||
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
|
||||
ERROR: writing to worker nodes is not currently allowed
|
||||
DETAIL: the database is read-only
|
||||
SET citus.enable_single_hash_repartition_joins TO ON;
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
|
||||
ERROR: writing to worker nodes is not currently allowed
|
||||
DETAIL: the database is read-only
|
||||
SET citus.task_assignment_policy TO 'round-robin';
|
||||
SET citus.enable_single_hash_repartition_joins TO ON;
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
|
||||
ERROR: writing to worker nodes is not currently allowed
|
||||
DETAIL: the database is read-only
|
||||
SET citus.task_assignment_policy TO 'greedy';
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
|
||||
ERROR: writing to worker nodes is not currently allowed
|
||||
DETAIL: the database is read-only
|
||||
SET citus.task_assignment_policy TO 'first-replica';
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
|
||||
ERROR: writing to worker nodes is not currently allowed
|
||||
DETAIL: the database is read-only
|
||||
RESET citus.enable_repartition_joins;
|
||||
RESET citus.enable_single_hash_repartition_joins;
|
||||
-- Confirm that dummy placements work
|
||||
SELECT count(*) FROM test WHERE false;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test WHERE false GROUP BY GROUPING SETS (x,y);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Confirm that they work with round-robin task assignment policy
|
||||
SET citus.task_assignment_policy TO 'round-robin';
|
||||
SELECT count(*) FROM test WHERE false;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test WHERE false GROUP BY GROUPING SETS (x,y);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SET citus.task_assignment_policy TO 'greedy';
|
||||
SELECT count(*) FROM test WHERE false;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test WHERE false GROUP BY GROUPING SETS (x,y);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SET citus.task_assignment_policy TO 'first-replica';
|
||||
SELECT count(*) FROM test WHERE false;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test WHERE false GROUP BY GROUPING SETS (x,y);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
RESET citus.task_assignment_policy;
|
||||
-- now, connect to the follower but tell it to use secondary nodes. There are no
|
||||
-- secondary nodes so this should fail.
|
||||
-- (this is :follower_master_port but substitution doesn't work here)
|
||||
\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always'"
|
||||
SET search_path TO single_node;
|
||||
SELECT * FROM test WHERE x = 1;
|
||||
ERROR: node group 0 does not have a secondary node
|
||||
-- add the the follower as secondary nodes and try again, the SELECT statement
|
||||
-- should work this time
|
||||
\c - - - :master_port
|
||||
SET search_path TO single_node;
|
||||
SELECT 1 FROM master_add_node('localhost', :follower_master_port, groupid => 0, noderole => 'secondary');
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always'"
|
||||
SET search_path TO single_node;
|
||||
SELECT * FROM test WHERE x = 1;
|
||||
x | y
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM test ORDER BY x;
|
||||
x | y
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
2 | 7
|
||||
3 | 4
|
||||
4 | 5
|
||||
5 | 6
|
||||
(5 rows)
|
||||
|
||||
SELECT count(*) FROM ref;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM ref ORDER BY a;
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
5 | 6
|
||||
7 | 8
|
||||
(3 rows)
|
||||
|
||||
SELECT * FROM test, ref WHERE x = a ORDER BY x;
|
||||
x | y | a | b
|
||||
---------------------------------------------------------------------
|
||||
1 | 2 | 1 | 2
|
||||
5 | 6 | 5 | 6
|
||||
(2 rows)
|
||||
|
||||
SELECT count(*) FROM local;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM local ORDER BY c;
|
||||
c | d
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
3 | 4
|
||||
7 | 8
|
||||
(3 rows)
|
||||
|
||||
SELECT * FROM ref, local WHERE a = c ORDER BY a;
|
||||
a | b | c | d
|
||||
---------------------------------------------------------------------
|
||||
1 | 2 | 1 | 2
|
||||
7 | 8 | 7 | 8
|
||||
(2 rows)
|
||||
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
|
||||
ERROR: writing to worker nodes is not currently allowed
|
||||
DETAIL: the database is read-only
|
||||
SET citus.enable_single_hash_repartition_joins TO ON;
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
|
||||
ERROR: writing to worker nodes is not currently allowed
|
||||
DETAIL: the database is read-only
|
||||
RESET citus.enable_repartition_joins;
|
||||
RESET citus.enable_single_hash_repartition_joins;
|
||||
-- Confirm that dummy placements work
|
||||
SELECT count(*) FROM test WHERE false;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test WHERE false GROUP BY GROUPING SETS (x,y);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Confirm that they work with round-robin task assignment policy
|
||||
SET citus.task_assignment_policy TO 'round-robin';
|
||||
SELECT count(*) FROM test WHERE false;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test WHERE false GROUP BY GROUPING SETS (x,y);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
RESET citus.task_assignment_policy;
|
||||
-- Cleanup
|
||||
\c - - - :master_port
|
||||
SET search_path TO single_node;
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA single_node CASCADE;
|
||||
-- Remove the coordinator again
|
||||
SELECT 1 FROM master_remove_node('localhost', :master_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- Remove the secondary coordinator again
|
||||
SELECT 1 FROM master_remove_node('localhost', :follower_master_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
|
@ -142,6 +142,85 @@ SELECT * FROM composite_type_partitioned_table WHERE col = '(7, 8)'::test_compo
|
|||
6 | (7,8)
|
||||
(1 row)
|
||||
|
||||
CREATE TYPE other_composite_type AS (
|
||||
i integer,
|
||||
i2 integer
|
||||
);
|
||||
-- Check that casts are correctly done on partition columns
|
||||
SELECT run_command_on_coordinator_and_workers($cf$
|
||||
CREATE CAST (other_composite_type AS test_composite_type) WITH INOUT AS IMPLICIT;
|
||||
$cf$);
|
||||
run_command_on_coordinator_and_workers
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO composite_type_partitioned_table VALUES (123, '(123, 456)'::other_composite_type);
|
||||
SELECT * FROM composite_type_partitioned_table WHERE id = 123;
|
||||
id | col
|
||||
---------------------------------------------------------------------
|
||||
123 | (123,456)
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE TRUE, TIMING FALSE, SUMMARY FALSE)
|
||||
INSERT INTO composite_type_partitioned_table VALUES (123, '(123, 456)'::other_composite_type);
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Query: INSERT INTO public.composite_type_partitioned_table_530003 (id, col) VALUES (123, '(123,456)'::public.test_composite_type)
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Insert on public.composite_type_partitioned_table_530003 (actual rows=0 loops=1)
|
||||
-> Result (actual rows=1 loops=1)
|
||||
Output: 123, '(123,456)'::test_composite_type
|
||||
(9 rows)
|
||||
|
||||
SELECT run_command_on_coordinator_and_workers($cf$
|
||||
DROP CAST (other_composite_type as test_composite_type);
|
||||
$cf$);
|
||||
run_command_on_coordinator_and_workers
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_coordinator_and_workers($cf$
|
||||
CREATE FUNCTION to_test_composite_type(arg other_composite_type) RETURNS test_composite_type
|
||||
AS 'select arg::text::test_composite_type;'
|
||||
LANGUAGE SQL
|
||||
IMMUTABLE
|
||||
RETURNS NULL ON NULL INPUT;
|
||||
$cf$);
|
||||
run_command_on_coordinator_and_workers
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_coordinator_and_workers($cf$
|
||||
CREATE CAST (other_composite_type AS test_composite_type) WITH FUNCTION to_test_composite_type(other_composite_type) AS IMPLICIT;
|
||||
$cf$);
|
||||
run_command_on_coordinator_and_workers
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO composite_type_partitioned_table VALUES (456, '(456, 678)'::other_composite_type);
|
||||
EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE TRUE, TIMING FALSE, SUMMARY FALSE)
|
||||
INSERT INTO composite_type_partitioned_table VALUES (123, '(456, 678)'::other_composite_type);
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Query: INSERT INTO public.composite_type_partitioned_table_530000 (id, col) VALUES (123, '(456,678)'::public.other_composite_type)
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Insert on public.composite_type_partitioned_table_530000 (actual rows=0 loops=1)
|
||||
-> Result (actual rows=1 loops=1)
|
||||
Output: 123, '(456,678)'::test_composite_type
|
||||
(9 rows)
|
||||
|
||||
-- create and distribute a table on enum type column
|
||||
CREATE TYPE bug_status AS ENUM ('new', 'open', 'closed');
|
||||
CREATE TABLE bugs (
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue