mirror of https://github.com/citusdata/citus.git
Run linter to use the new editorconfig rules
It is not so easy to use a linter on all the rules as the tool I used fails to honor .gitignore files. It ends up checking many generated files and comes up with many false errors. I used the following commands to check rules and attempt to fix all problems: - `git ls-files | xargs eclint check` - `git ls-files | xargs eclint fix` However, eclint was not able to fix all the problems for me automatically. I used the check command output and fixed them by hand.pull/6656/head
parent
a97478a85d
commit
5862926517
|
@ -298,7 +298,7 @@ jobs:
|
|||
find src/test/regress/tmp_citus_test/ -name "regression*.diffs" -exec cat {} +
|
||||
lines=$(find src/test/regress/tmp_citus_test/ -name "regression*.diffs" | wc -l)
|
||||
if [ $lines -ne 0 ]; then
|
||||
exit 1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
when: on_fail
|
||||
|
|
2
Makefile
2
Makefile
|
@ -6,7 +6,7 @@ extension_dir = $(shell $(PG_CONFIG) --sharedir)/extension
|
|||
|
||||
# Hint that configure should be run first
|
||||
ifeq (,$(wildcard Makefile.global))
|
||||
$(error ./configure needs to be run before compiling Citus)
|
||||
$(error ./configure needs to be run before compiling Citus)
|
||||
endif
|
||||
|
||||
include Makefile.global
|
||||
|
|
|
@ -73,7 +73,7 @@ $(citus_abs_top_srcdir)/configure: $(citus_abs_top_srcdir)/configure.ac
|
|||
# be able to use a different one, especially when building against
|
||||
# distribution packages.
|
||||
ifneq (@CC@,)
|
||||
override CC=@CC@
|
||||
override CC=@CC@
|
||||
endif
|
||||
|
||||
# If detected by our configure script, override the FLEX postgres
|
||||
|
@ -81,14 +81,14 @@ endif
|
|||
# built without flex available (possible because generated files are
|
||||
# included)
|
||||
ifneq (@FLEX@,)
|
||||
override FLEX=@FLEX@
|
||||
override FLEX=@FLEX@
|
||||
endif
|
||||
|
||||
# Add options passed to configure or computed therein, to CFLAGS/CPPFLAGS/...
|
||||
override CFLAGS += @CFLAGS@ @CITUS_CFLAGS@
|
||||
override BITCODE_CFLAGS := $(BITCODE_CFLAGS) @CITUS_BITCODE_CFLAGS@
|
||||
ifneq ($(GIT_VERSION),)
|
||||
override CFLAGS += -DGIT_VERSION=\"$(GIT_VERSION)\"
|
||||
override CFLAGS += -DGIT_VERSION=\"$(GIT_VERSION)\"
|
||||
endif
|
||||
override CPPFLAGS := @CPPFLAGS@ @CITUS_CPPFLAGS@ -I '${citus_abs_top_srcdir}/src/include' -I'${citus_top_builddir}/src/include' $(CPPFLAGS)
|
||||
override LDFLAGS += @LDFLAGS@ @CITUS_LDFLAGS@
|
||||
|
|
|
@ -20,9 +20,9 @@
|
|||
},
|
||||
"license": "PostgreSQL",
|
||||
"licenseDetail": [
|
||||
"Portions Copyright (c) 1996-2010, The PostgreSQL Global Development Group",
|
||||
"",
|
||||
"Portions Copyright (c) 1994, The Regents of the University of California",
|
||||
"Portions Copyright (c) 1996-2010, The PostgreSQL Global Development Group",
|
||||
"",
|
||||
"Portions Copyright (c) 1994, The Regents of the University of California",
|
||||
"",
|
||||
"Permission to use, copy, modify, and distribute this software and its documentation for ",
|
||||
"any purpose, without fee, and without a written agreement is hereby granted, provided ",
|
||||
|
|
|
@ -25,23 +25,23 @@ basedir="$(pwd)"
|
|||
rm -rf "${basedir}/.git"
|
||||
|
||||
build_ext() {
|
||||
pg_major="$1"
|
||||
pg_major="$1"
|
||||
|
||||
builddir="${basedir}/build-${pg_major}"
|
||||
echo "Beginning build of ${project} for PostgreSQL ${pg_major}..." >&2
|
||||
builddir="${basedir}/build-${pg_major}"
|
||||
echo "Beginning build of ${project} for PostgreSQL ${pg_major}..." >&2
|
||||
|
||||
# do everything in a subdirectory to avoid clutter in current directory
|
||||
mkdir -p "${builddir}" && cd "${builddir}"
|
||||
# do everything in a subdirectory to avoid clutter in current directory
|
||||
mkdir -p "${builddir}" && cd "${builddir}"
|
||||
|
||||
CFLAGS=-Werror "${basedir}/configure" PG_CONFIG="/usr/lib/postgresql/${pg_major}/bin/pg_config" --enable-coverage --with-security-flags
|
||||
CFLAGS=-Werror "${basedir}/configure" PG_CONFIG="/usr/lib/postgresql/${pg_major}/bin/pg_config" --enable-coverage --with-security-flags
|
||||
|
||||
installdir="${builddir}/install"
|
||||
make -j$(nproc) && mkdir -p "${installdir}" && { make DESTDIR="${installdir}" install-all || make DESTDIR="${installdir}" install ; }
|
||||
installdir="${builddir}/install"
|
||||
make -j$(nproc) && mkdir -p "${installdir}" && { make DESTDIR="${installdir}" install-all || make DESTDIR="${installdir}" install ; }
|
||||
|
||||
cd "${installdir}" && find . -type f -print > "${builddir}/files.lst"
|
||||
tar cvf "${basedir}/install-${pg_major}.tar" `cat ${builddir}/files.lst`
|
||||
cd "${installdir}" && find . -type f -print > "${builddir}/files.lst"
|
||||
tar cvf "${basedir}/install-${pg_major}.tar" `cat ${builddir}/files.lst`
|
||||
|
||||
cd "${builddir}" && rm -rf install files.lst && make clean
|
||||
cd "${builddir}" && rm -rf install files.lst && make clean
|
||||
}
|
||||
|
||||
build_ext "${PG_MAJOR}"
|
||||
|
|
|
@ -13,7 +13,7 @@ echo_and_restore() {
|
|||
builtin echo "$*"
|
||||
#shellcheck disable=SC2154
|
||||
case "$save_flags" in
|
||||
(*x*) set -x
|
||||
(*x*) set -x
|
||||
esac
|
||||
}
|
||||
|
||||
|
@ -29,4 +29,3 @@ hint_on_fail() {
|
|||
exit $exit_code
|
||||
}
|
||||
trap hint_on_fail EXIT
|
||||
|
||||
|
|
|
@ -5,6 +5,6 @@ set -euo pipefail
|
|||
source ci/ci_helpers.sh
|
||||
|
||||
for f in $(git ls-tree -r HEAD --name-only src/test/regress/expected/*.out); do
|
||||
sed -Ef src/test/regress/bin/normalize.sed < "$f" > "$f.modified"
|
||||
mv "$f.modified" "$f"
|
||||
sed -Ef src/test/regress/bin/normalize.sed < "$f" > "$f.modified"
|
||||
mv "$f.modified" "$f"
|
||||
done
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
# Convert type and name to shell variable name (e.g., "enable_long_strings")
|
||||
m4_define([pgac_arg_to_variable],
|
||||
[$1[]_[]patsubst($2, -, _)])
|
||||
[$1[]_[]patsubst($2, -, _)])
|
||||
|
||||
|
||||
# PGAC_ARG(TYPE, NAME, HELP-STRING-LHS-EXTRA, HELP-STRING-RHS,
|
||||
|
@ -39,34 +39,34 @@ m4_case([$1],
|
|||
|
||||
enable, [
|
||||
AC_ARG_ENABLE([$2], [AS_HELP_STRING([--]m4_if($3, -, disable, enable)[-$2]m4_if($3, -, , $3), [$4])], [
|
||||
case [$]enableval in
|
||||
yes)
|
||||
m4_default([$5], :)
|
||||
;;
|
||||
no)
|
||||
m4_default([$6], :)
|
||||
;;
|
||||
*)
|
||||
$7
|
||||
;;
|
||||
esac
|
||||
case [$]enableval in
|
||||
yes)
|
||||
m4_default([$5], :)
|
||||
;;
|
||||
no)
|
||||
m4_default([$6], :)
|
||||
;;
|
||||
*)
|
||||
$7
|
||||
;;
|
||||
esac
|
||||
],
|
||||
[$8])[]dnl AC_ARG_ENABLE
|
||||
],
|
||||
|
||||
with, [
|
||||
AC_ARG_WITH([$2], [AS_HELP_STRING([--]m4_if($3, -, without, with)[-$2]m4_if($3, -, , $3), [$4])], [
|
||||
case [$]withval in
|
||||
yes)
|
||||
m4_default([$5], :)
|
||||
;;
|
||||
no)
|
||||
m4_default([$6], :)
|
||||
;;
|
||||
*)
|
||||
$7
|
||||
;;
|
||||
esac
|
||||
case [$]withval in
|
||||
yes)
|
||||
m4_default([$5], :)
|
||||
;;
|
||||
no)
|
||||
m4_default([$6], :)
|
||||
;;
|
||||
*)
|
||||
$7
|
||||
;;
|
||||
esac
|
||||
],
|
||||
[$8])[]dnl AC_ARG_WITH
|
||||
],
|
||||
|
@ -93,13 +93,13 @@ dnl that by making the help string look the same, which is why we need to
|
|||
dnl save the default that was passed in previously.
|
||||
m4_define([_pgac_helpdefault], m4_ifdef([pgac_defined_$1_$2_bool], [m4_defn([pgac_defined_$1_$2_bool])], [$3]))dnl
|
||||
PGAC_ARG([$1], [$2], [m4_if(_pgac_helpdefault, yes, -)], [$4], [$5], [$6],
|
||||
[AC_MSG_ERROR([no argument expected for --$1-$2 option])],
|
||||
[m4_case([$3],
|
||||
yes, [pgac_arg_to_variable([$1], [$2])=yes
|
||||
[AC_MSG_ERROR([no argument expected for --$1-$2 option])],
|
||||
[m4_case([$3],
|
||||
yes, [pgac_arg_to_variable([$1], [$2])=yes
|
||||
$5],
|
||||
no, [pgac_arg_to_variable([$1], [$2])=no
|
||||
no, [pgac_arg_to_variable([$1], [$2])=no
|
||||
$6],
|
||||
[m4_fatal([third argument of $0 must be 'yes' or 'no', not '$3'])])])[]dnl
|
||||
[m4_fatal([third argument of $0 must be 'yes' or 'no', not '$3'])])])[]dnl
|
||||
m4_define([pgac_defined_$1_$2_bool], [$3])dnl
|
||||
])# PGAC_ARG_BOOL
|
||||
|
||||
|
@ -112,10 +112,10 @@ m4_define([pgac_defined_$1_$2_bool], [$3])dnl
|
|||
|
||||
AC_DEFUN([PGAC_ARG_REQ],
|
||||
[PGAC_ARG([$1], [$2], [=$3], [$4],
|
||||
[AC_MSG_ERROR([argument required for --$1-$2 option])],
|
||||
[AC_MSG_ERROR([argument required for --$1-$2 option])],
|
||||
[$5],
|
||||
[$6])])# PGAC_ARG_REQ
|
||||
[AC_MSG_ERROR([argument required for --$1-$2 option])],
|
||||
[AC_MSG_ERROR([argument required for --$1-$2 option])],
|
||||
[$5],
|
||||
[$6])])# PGAC_ARG_REQ
|
||||
|
||||
|
||||
# PGAC_ARG_OPTARG(TYPE, NAME, HELP-ARGNAME, HELP-STRING-RHS,
|
||||
|
@ -134,17 +134,17 @@ AC_DEFUN([PGAC_ARG_REQ],
|
|||
|
||||
AC_DEFUN([PGAC_ARG_OPTARG],
|
||||
[PGAC_ARG([$1], [$2], [@<:@=$3@:>@], [$4], [$5], [],
|
||||
[pgac_arg_to_variable([$1], [$2])=yes
|
||||
[pgac_arg_to_variable([$1], [$2])=yes
|
||||
$6],
|
||||
[pgac_arg_to_variable([$1], [$2])=no])
|
||||
[pgac_arg_to_variable([$1], [$2])=no])
|
||||
dnl Add this code only if there's a ACTION-ENABLED or ACTION-DISABLED.
|
||||
m4_ifval([$7[]$8],
|
||||
[
|
||||
if test "[$]pgac_arg_to_variable([$1], [$2])" = yes; then
|
||||
m4_default([$7], :)
|
||||
m4_default([$7], :)
|
||||
m4_ifval([$8],
|
||||
[else
|
||||
$8
|
||||
$8
|
||||
])[]dnl
|
||||
fi
|
||||
])[]dnl
|
||||
|
|
|
@ -11,11 +11,11 @@ help="\
|
|||
Usage: $me sourcetree [buildtree]"
|
||||
|
||||
if test -z "$1"; then
|
||||
echo "$help" 1>&2
|
||||
exit 1
|
||||
echo "$help" 1>&2
|
||||
exit 1
|
||||
elif test x"$1" = x"--help"; then
|
||||
echo "$help"
|
||||
exit 0
|
||||
echo "$help"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
unset CDPATH
|
||||
|
@ -29,19 +29,19 @@ buildtree=`cd ${2:-'.'} && pwd`
|
|||
# the source tree, if a VPATH build is done from a distribution tarball.
|
||||
# See bug #5595.
|
||||
for item in `find "$sourcetree" -type d \( \( -name CVS -prune \) -o \( -name .git -prune \) -o -print \) | grep -v "$sourcetree/doc/src/sgml/\+"`; do
|
||||
subdir=`expr "$item" : "$sourcetree\(.*\)"`
|
||||
if test ! -d "$buildtree/$subdir"; then
|
||||
mkdir -p "$buildtree/$subdir" || exit 1
|
||||
fi
|
||||
subdir=`expr "$item" : "$sourcetree\(.*\)"`
|
||||
if test ! -d "$buildtree/$subdir"; then
|
||||
mkdir -p "$buildtree/$subdir" || exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
for item in `find "$sourcetree" -not -path '*/.git/hg/*' \( -name Makefile -print -o -name GNUmakefile -print \)`; do
|
||||
filename=`expr "$item" : "$sourcetree\(.*\)"`
|
||||
if test ! -f "${item}.in"; then
|
||||
if cmp "$item" "$buildtree/$filename" >/dev/null 2>&1; then : ; else
|
||||
ln -fs "$item" "$buildtree/$filename" || exit 1
|
||||
fi
|
||||
fi
|
||||
filename=`expr "$item" : "$sourcetree\(.*\)"`
|
||||
if test ! -f "${item}.in"; then
|
||||
if cmp "$item" "$buildtree/$filename" >/dev/null 2>&1; then : ; else
|
||||
ln -fs "$item" "$buildtree/$filename" || exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
exit 0
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
|
|
@ -22,4 +22,3 @@ configure -whitespace
|
|||
|
||||
# all C files (implementation and header) use our style...
|
||||
*.[ch] citus-style
|
||||
|
||||
|
|
|
@ -28,5 +28,3 @@ ALTER EXTENSION citus_columnar ADD VIEW columnar.chunk;
|
|||
ALTER FUNCTION citus_internal.upgrade_columnar_storage(regclass) SET SCHEMA columnar_internal;
|
||||
ALTER FUNCTION citus_internal.downgrade_columnar_storage(regclass) SET SCHEMA columnar_internal;
|
||||
ALTER FUNCTION citus_internal.columnar_ensure_am_depends_catalog() SET SCHEMA columnar_internal;
|
||||
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ include $(citus_top_builddir)/Makefile.global
|
|||
CITUS_VERSION_INVALIDATE := $(filter-out utils/citus_version.o,$(OBJS))
|
||||
CITUS_VERSION_INVALIDATE += $(generated_sql_files)
|
||||
ifneq ($(wildcard $(citus_top_builddir)/.git/.*),)
|
||||
CITUS_VERSION_INVALIDATE += $(citus_top_builddir)/.git/index
|
||||
CITUS_VERSION_INVALIDATE += $(citus_top_builddir)/.git/index
|
||||
endif
|
||||
utils/citus_version.o: $(CITUS_VERSION_INVALIDATE)
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ ifeq ($(pg_major_version),13)
|
|||
else ifeq ($(pg_major_version),14)
|
||||
test_path = t_pg13_pg14/*.pl
|
||||
else
|
||||
test_path = t/*.pl
|
||||
test_path = t/*.pl
|
||||
endif
|
||||
|
||||
# copied from pgxs/Makefile.global to use postgres' abs build dir for pg_regress
|
||||
|
|
|
@ -4,4 +4,3 @@ vacuum_freeze_min_age = 50000
|
|||
vacuum_freeze_table_age = 50000
|
||||
synchronous_commit = off
|
||||
fsync = off
|
||||
|
||||
|
|
|
@ -24,17 +24,17 @@ INSERT INTO test_columnar_freeze VALUES (1);
|
|||
my $ten_thousand_updates = "";
|
||||
|
||||
foreach (1..10000) {
|
||||
$ten_thousand_updates .= "UPDATE test_row SET i = i + 1;\n";
|
||||
$ten_thousand_updates .= "UPDATE test_row SET i = i + 1;\n";
|
||||
}
|
||||
|
||||
# 70K updates
|
||||
foreach (1..7) {
|
||||
$node_one->safe_psql('postgres', $ten_thousand_updates);
|
||||
$node_one->safe_psql('postgres', $ten_thousand_updates);
|
||||
}
|
||||
|
||||
my $result = $node_one->safe_psql('postgres', "
|
||||
select age(relfrozenxid) < 70000 as was_frozen
|
||||
from pg_class where relname='test_columnar_freeze';
|
||||
from pg_class where relname='test_columnar_freeze';
|
||||
");
|
||||
print "node one count: $result\n";
|
||||
is($result, qq(f), 'columnar table was not frozen');
|
||||
|
@ -43,10 +43,9 @@ $node_one->safe_psql('postgres', 'VACUUM FREEZE test_columnar_freeze;');
|
|||
|
||||
$result = $node_one->safe_psql('postgres', "
|
||||
select age(relfrozenxid) < 70000 as was_frozen
|
||||
from pg_class where relname='test_columnar_freeze';
|
||||
from pg_class where relname='test_columnar_freeze';
|
||||
");
|
||||
print "node one count: $result\n";
|
||||
is($result, qq(t), 'columnar table was frozen');
|
||||
|
||||
$node_one->stop('fast');
|
||||
|
||||
|
|
|
@ -24,17 +24,17 @@ INSERT INTO test_columnar_freeze VALUES (1);
|
|||
my $ten_thousand_updates = "";
|
||||
|
||||
foreach (1..10000) {
|
||||
$ten_thousand_updates .= "UPDATE test_row SET i = i + 1;\n";
|
||||
$ten_thousand_updates .= "UPDATE test_row SET i = i + 1;\n";
|
||||
}
|
||||
|
||||
# 70K updates
|
||||
foreach (1..7) {
|
||||
$node_one->safe_psql('postgres', $ten_thousand_updates);
|
||||
$node_one->safe_psql('postgres', $ten_thousand_updates);
|
||||
}
|
||||
|
||||
my $result = $node_one->safe_psql('postgres', "
|
||||
select age(relfrozenxid) < 70000 as was_frozen
|
||||
from pg_class where relname='test_columnar_freeze';
|
||||
from pg_class where relname='test_columnar_freeze';
|
||||
");
|
||||
print "node one count: $result\n";
|
||||
is($result, qq(f), 'columnar table was not frozen');
|
||||
|
@ -43,10 +43,9 @@ $node_one->safe_psql('postgres', 'VACUUM FREEZE test_columnar_freeze;');
|
|||
|
||||
$result = $node_one->safe_psql('postgres', "
|
||||
select age(relfrozenxid) < 70000 as was_frozen
|
||||
from pg_class where relname='test_columnar_freeze';
|
||||
from pg_class where relname='test_columnar_freeze';
|
||||
");
|
||||
print "node one count: $result\n";
|
||||
is($result, qq(t), 'columnar table was frozen');
|
||||
|
||||
$node_one->stop('fast');
|
||||
|
||||
|
|
|
@ -67,5 +67,3 @@ fi
|
|||
|
||||
# create cluster and run the hammerd benchmark
|
||||
./create-run.sh
|
||||
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ ifeq ($(pg_major_version),13)
|
|||
else ifeq ($(pg_major_version),14)
|
||||
test_path = t_pg13_pg14/*.pl
|
||||
else
|
||||
test_path = t/*.pl
|
||||
test_path = t/*.pl
|
||||
endif
|
||||
|
||||
# copied from pgxs/Makefile.global to use postgres' abs build dir for pg_regress
|
||||
|
|
|
@ -292,4 +292,3 @@ clean distclean maintainer-clean:
|
|||
rm -rf input/ output/
|
||||
rm -rf tmp_check/
|
||||
rm -rf tmp_citus_test/
|
||||
|
||||
|
|
|
@ -51,4 +51,3 @@ then
|
|||
else
|
||||
exec "$DIFF" -w $args "$file1" "$file2"
|
||||
fi
|
||||
|
||||
|
|
|
@ -214,9 +214,9 @@ s/^(ERROR: child table is missing constraint "\w+)_([0-9])+"/\1_xxxxxx"/g
|
|||
# session. Sometimes happens that deadlock detector cancels the session before
|
||||
# lock detection, so we normalize it by removing these two lines.
|
||||
/^ <waiting ...>$/ {
|
||||
N; /\nstep s1-update-2: <... completed>$/ {
|
||||
s/.*//g
|
||||
}
|
||||
N; /\nstep s1-update-2: <... completed>$/ {
|
||||
s/.*//g
|
||||
}
|
||||
}
|
||||
|
||||
# normalize long table shard name errors for alter_table_set_access_method and alter_distributed_table
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
|
|
@ -229,7 +229,7 @@ class CitusSmallSharedPoolSizeConfig(CitusDefaultClusterConfig):
|
|||
def __init__(self, arguments):
|
||||
super().__init__(arguments)
|
||||
self.new_settings = {
|
||||
"citus.local_shared_pool_size": 5,
|
||||
"citus.local_shared_pool_size": 5,
|
||||
"citus.max_shared_pool_size": 5,
|
||||
}
|
||||
|
||||
|
@ -308,7 +308,7 @@ class CitusUnusualQuerySettingsConfig(CitusDefaultClusterConfig):
|
|||
# As of c11, there is no way to do that through remote execution so this test
|
||||
# will fail
|
||||
"arbitrary_configs_truncate_cascade_create", "arbitrary_configs_truncate_cascade",
|
||||
# Alter Table statement cannot be run from an arbitrary node so this test will fail
|
||||
# Alter Table statement cannot be run from an arbitrary node so this test will fail
|
||||
"arbitrary_configs_alter_table_add_constraint_without_name_create", "arbitrary_configs_alter_table_add_constraint_without_name"]
|
||||
|
||||
class CitusSingleNodeSingleShardClusterConfig(CitusDefaultClusterConfig):
|
||||
|
@ -335,7 +335,7 @@ class CitusShardReplicationFactorClusterConfig(CitusDefaultClusterConfig):
|
|||
# citus does not support colocating functions with distributed tables when
|
||||
# citus.shard_replication_factor >= 2
|
||||
"function_create", "functions",
|
||||
# Alter Table statement cannot be run from an arbitrary node so this test will fail
|
||||
# Alter Table statement cannot be run from an arbitrary node so this test will fail
|
||||
"arbitrary_configs_alter_table_add_constraint_without_name_create", "arbitrary_configs_alter_table_add_constraint_without_name"]
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
|
File diff suppressed because it is too large
Load Diff
|
@ -2,55 +2,55 @@
|
|||
|
||||
setup
|
||||
{
|
||||
SELECT citus_set_coordinator_host('localhost', 57636);
|
||||
SELECT citus_set_coordinator_host('localhost', 57636);
|
||||
|
||||
CREATE TABLE dist_table(a int);
|
||||
CREATE TABLE citus_local_table(a int);
|
||||
CREATE TABLE local_table(a int);
|
||||
CREATE TABLE ref_table(a int);
|
||||
CREATE TABLE dist_table(a int);
|
||||
CREATE TABLE citus_local_table(a int);
|
||||
CREATE TABLE local_table(a int);
|
||||
CREATE TABLE ref_table(a int);
|
||||
|
||||
CREATE TABLE partitioned_table(a int)
|
||||
PARTITION BY RANGE(a);
|
||||
CREATE TABLE partitioned_table(a int)
|
||||
PARTITION BY RANGE(a);
|
||||
|
||||
CREATE TABLE partition_1 PARTITION OF partitioned_table
|
||||
FOR VALUES FROM (1) TO (11);
|
||||
CREATE TABLE partition_1 PARTITION OF partitioned_table
|
||||
FOR VALUES FROM (1) TO (11);
|
||||
|
||||
CREATE TABLE partition_2 PARTITION OF partitioned_table
|
||||
FOR VALUES FROM (11) TO (21);
|
||||
CREATE TABLE partition_2 PARTITION OF partitioned_table
|
||||
FOR VALUES FROM (11) TO (21);
|
||||
|
||||
SELECT create_distributed_table('dist_table', 'a');
|
||||
SELECT create_reference_table('ref_table');
|
||||
SELECT citus_add_local_table_to_metadata('citus_local_table');
|
||||
SELECT create_distributed_table('partitioned_table', 'a');
|
||||
SELECT create_distributed_table('dist_table', 'a');
|
||||
SELECT create_reference_table('ref_table');
|
||||
SELECT citus_add_local_table_to_metadata('citus_local_table');
|
||||
SELECT create_distributed_table('partitioned_table', 'a');
|
||||
|
||||
CREATE VIEW sub_view(a) AS
|
||||
SELECT 2 * a AS a
|
||||
FROM ref_table;
|
||||
CREATE VIEW sub_view(a) AS
|
||||
SELECT 2 * a AS a
|
||||
FROM ref_table;
|
||||
|
||||
CREATE VIEW main_view AS
|
||||
SELECT t1.a a1, t2.a a2, t3.a a3
|
||||
FROM dist_table t1
|
||||
JOIN citus_local_table t2 ON t1.a = t2.a
|
||||
JOIN sub_view t3 ON t2.a = t3.a;
|
||||
CREATE VIEW main_view AS
|
||||
SELECT t1.a a1, t2.a a2, t3.a a3
|
||||
FROM dist_table t1
|
||||
JOIN citus_local_table t2 ON t1.a = t2.a
|
||||
JOIN sub_view t3 ON t2.a = t3.a;
|
||||
|
||||
INSERT INTO dist_table SELECT n FROM generate_series(1, 5) n;
|
||||
INSERT INTO citus_local_table SELECT n FROM generate_series(1, 5) n;
|
||||
INSERT INTO local_table SELECT n FROM generate_series(1, 5) n;
|
||||
INSERT INTO ref_table SELECT n FROM generate_series(1, 5) n;
|
||||
INSERT INTO partitioned_table SELECT n FROM generate_series(8, 12) n;
|
||||
INSERT INTO dist_table SELECT n FROM generate_series(1, 5) n;
|
||||
INSERT INTO citus_local_table SELECT n FROM generate_series(1, 5) n;
|
||||
INSERT INTO local_table SELECT n FROM generate_series(1, 5) n;
|
||||
INSERT INTO ref_table SELECT n FROM generate_series(1, 5) n;
|
||||
INSERT INTO partitioned_table SELECT n FROM generate_series(8, 12) n;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP VIEW main_view;
|
||||
DROP VIEW sub_view;
|
||||
DROP TABLE dist_table;
|
||||
DROP TABLE citus_local_table;
|
||||
DROP TABLE local_table;
|
||||
DROP TABLE ref_table;
|
||||
DROP TABLE partitioned_table;
|
||||
DROP VIEW main_view;
|
||||
DROP VIEW sub_view;
|
||||
DROP TABLE dist_table;
|
||||
DROP TABLE citus_local_table;
|
||||
DROP TABLE local_table;
|
||||
DROP TABLE ref_table;
|
||||
DROP TABLE partitioned_table;
|
||||
|
||||
SELECT citus_remove_node('localhost', 57636);
|
||||
SELECT citus_remove_node('localhost', 57636);
|
||||
}
|
||||
|
||||
// coordinator session
|
||||
|
@ -58,77 +58,77 @@ session "coor"
|
|||
|
||||
step "coor-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "coor-acquire-aggresive-lock-on-dist-table"
|
||||
{
|
||||
LOCK dist_table IN ACCESS EXCLUSIVE MODE;
|
||||
LOCK dist_table IN ACCESS EXCLUSIVE MODE;
|
||||
}
|
||||
|
||||
step "coor-acquire-aggresive-lock-on-dist-table-nowait"
|
||||
{
|
||||
LOCK dist_table IN ACCESS EXCLUSIVE MODE NOWAIT;
|
||||
LOCK dist_table IN ACCESS EXCLUSIVE MODE NOWAIT;
|
||||
}
|
||||
|
||||
step "coor-acquire-weak-lock-on-dist-table"
|
||||
{
|
||||
LOCK dist_table IN ACCESS SHARE MODE;
|
||||
LOCK dist_table IN ACCESS SHARE MODE;
|
||||
}
|
||||
|
||||
step "coor-acquire-aggresive-lock-on-view"
|
||||
{
|
||||
LOCK main_view IN ACCESS EXCLUSIVE MODE;
|
||||
LOCK main_view IN ACCESS EXCLUSIVE MODE;
|
||||
}
|
||||
|
||||
step "coor-acquire-aggresive-lock-on-only-view"
|
||||
{
|
||||
LOCK ONLY main_view IN ACCESS EXCLUSIVE MODE;
|
||||
LOCK ONLY main_view IN ACCESS EXCLUSIVE MODE;
|
||||
}
|
||||
|
||||
step "coor-acquire-aggresive-lock-on-view-nowait"
|
||||
{
|
||||
LOCK main_view IN ACCESS EXCLUSIVE MODE NOWAIT;
|
||||
LOCK main_view IN ACCESS EXCLUSIVE MODE NOWAIT;
|
||||
}
|
||||
|
||||
step "coor-lock-all"
|
||||
{
|
||||
LOCK dist_table, citus_local_table, ref_table, main_view, sub_view, local_table IN ACCESS EXCLUSIVE MODE;
|
||||
LOCK dist_table, citus_local_table, ref_table, main_view, sub_view, local_table IN ACCESS EXCLUSIVE MODE;
|
||||
}
|
||||
|
||||
step "coor-read-dist-table"
|
||||
{
|
||||
SELECT COUNT(*) FROM dist_table;
|
||||
SELECT COUNT(*) FROM dist_table;
|
||||
}
|
||||
|
||||
step "coor-read-ref-table"
|
||||
{
|
||||
SELECT COUNT(*) FROM ref_table;
|
||||
SELECT COUNT(*) FROM ref_table;
|
||||
}
|
||||
|
||||
step "coor-acquire-aggresive-lock-on-partitioned-table"
|
||||
{
|
||||
LOCK partitioned_table IN ACCESS EXCLUSIVE MODE;
|
||||
LOCK partitioned_table IN ACCESS EXCLUSIVE MODE;
|
||||
}
|
||||
|
||||
step "coor-acquire-aggresive-lock-on-partitioned-table-with-*-syntax"
|
||||
{
|
||||
LOCK partitioned_table * IN ACCESS EXCLUSIVE MODE;
|
||||
LOCK partitioned_table * IN ACCESS EXCLUSIVE MODE;
|
||||
}
|
||||
|
||||
step "coor-acquire-aggresive-lock-on-only-partitioned-table"
|
||||
{
|
||||
LOCK ONLY partitioned_table IN ACCESS EXCLUSIVE MODE;
|
||||
LOCK ONLY partitioned_table IN ACCESS EXCLUSIVE MODE;
|
||||
}
|
||||
|
||||
step "coor-acquire-aggresive-lock-on-ref-table"
|
||||
{
|
||||
LOCK ref_table IN ACCESS EXCLUSIVE MODE;
|
||||
LOCK ref_table IN ACCESS EXCLUSIVE MODE;
|
||||
}
|
||||
|
||||
step "coor-rollback"
|
||||
{
|
||||
ROLLBACK;
|
||||
ROLLBACK;
|
||||
}
|
||||
|
||||
// worker 1 xact session
|
||||
|
@ -141,56 +141,56 @@ step "w1-start-session-level-connection"
|
|||
|
||||
step "w1-begin"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
}
|
||||
|
||||
step "w1-read-dist-table"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM dist_table');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM dist_table');
|
||||
}
|
||||
|
||||
step "w1-read-ref-table"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM ref_table');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM ref_table');
|
||||
}
|
||||
|
||||
step "w1-read-citus-local-table"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM citus_local_table');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM citus_local_table');
|
||||
}
|
||||
|
||||
step "w1-acquire-aggressive-lock-dist-table" {
|
||||
SELECT run_commands_on_session_level_connection_to_node('LOCK dist_table IN ACCESS EXCLUSIVE MODE');
|
||||
SELECT run_commands_on_session_level_connection_to_node('LOCK dist_table IN ACCESS EXCLUSIVE MODE');
|
||||
}
|
||||
|
||||
step "w1-lock-reference-table"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('LOCK ref_table IN ACCESS EXCLUSIVE MODE');
|
||||
SELECT run_commands_on_session_level_connection_to_node('LOCK ref_table IN ACCESS EXCLUSIVE MODE');
|
||||
}
|
||||
|
||||
step "w1-read-partitioned-table"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM partitioned_table');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM partitioned_table');
|
||||
}
|
||||
|
||||
step "w1-read-partition-of-partitioned-table"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM partition_1');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM partition_1');
|
||||
}
|
||||
|
||||
step "w1-read-main-view"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM main_view');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM main_view');
|
||||
}
|
||||
|
||||
step "w1-rollback"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
|
||||
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
|
||||
}
|
||||
|
||||
step "w1-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
// worker 2 xact session
|
||||
|
@ -203,21 +203,21 @@ step "w2-start-session-level-connection"
|
|||
|
||||
step "w2-begin"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
}
|
||||
|
||||
step "w2-acquire-aggressive-lock-dist-table" {
|
||||
SELECT run_commands_on_session_level_connection_to_node('LOCK dist_table IN ACCESS EXCLUSIVE MODE');
|
||||
SELECT run_commands_on_session_level_connection_to_node('LOCK dist_table IN ACCESS EXCLUSIVE MODE');
|
||||
}
|
||||
|
||||
step "w2-rollback"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
|
||||
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
|
||||
}
|
||||
|
||||
step "w2-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
permutation "coor-begin" "coor-acquire-aggresive-lock-on-dist-table" "w1-start-session-level-connection" "w1-begin" "w1-read-dist-table" "coor-rollback" "w1-rollback" "w1-stop-connection"
|
||||
|
|
|
@ -6,7 +6,7 @@ setup
|
|||
|
||||
teardown
|
||||
{
|
||||
SELECT 1;
|
||||
SELECT 1;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
|
|
@ -2,24 +2,24 @@
|
|||
// add single one of the nodes for the purpose of the test
|
||||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
CREATE TABLE test_reference_table (test_id integer);
|
||||
CREATE TABLE test_reference_table_2 (test_id integer);
|
||||
INSERT INTO test_reference_table_2 VALUES (8);
|
||||
SELECT create_reference_table('test_reference_table');
|
||||
CREATE TABLE test_table (x int, y int);
|
||||
SELECT create_distributed_table('test_table','x');
|
||||
CREATE TABLE test_reference_table (test_id integer);
|
||||
CREATE TABLE test_reference_table_2 (test_id integer);
|
||||
INSERT INTO test_reference_table_2 VALUES (8);
|
||||
SELECT create_reference_table('test_reference_table');
|
||||
CREATE TABLE test_table (x int, y int);
|
||||
SELECT create_distributed_table('test_table','x');
|
||||
}
|
||||
|
||||
// ensure neither node's added for the remaining of the isolation tests
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS test_reference_table;
|
||||
DROP TABLE IF EXISTS test_reference_table_2;
|
||||
DROP TABLE IF EXISTS test_table;
|
||||
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
|
||||
DROP TABLE IF EXISTS test_reference_table;
|
||||
DROP TABLE IF EXISTS test_reference_table_2;
|
||||
DROP TABLE IF EXISTS test_table;
|
||||
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -31,12 +31,12 @@ step "s1-begin"
|
|||
|
||||
step "s1-add-second-worker"
|
||||
{
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
}
|
||||
|
||||
step "s1-drop-reference-table"
|
||||
{
|
||||
DROP TABLE test_reference_table;
|
||||
DROP TABLE test_reference_table;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
|
@ -50,74 +50,74 @@ session "s2"
|
|||
// loading the cache
|
||||
step "s2-load-metadata-cache"
|
||||
{
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
}
|
||||
|
||||
step "s2-copy-to-reference-table"
|
||||
{
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
}
|
||||
|
||||
step "s2-replicate-reference-tables"
|
||||
{
|
||||
SET client_min_messages TO DEBUG2;
|
||||
SELECT replicate_reference_tables();
|
||||
RESET client_min_messages;
|
||||
SET client_min_messages TO DEBUG2;
|
||||
SELECT replicate_reference_tables();
|
||||
RESET client_min_messages;
|
||||
}
|
||||
|
||||
step "s2-insert-to-reference-table"
|
||||
{
|
||||
INSERT INTO test_reference_table VALUES (6);
|
||||
INSERT INTO test_reference_table VALUES (6);
|
||||
}
|
||||
|
||||
step "s2-ddl-on-reference-table"
|
||||
{
|
||||
CREATE INDEX reference_index ON test_reference_table(test_id);
|
||||
CREATE INDEX reference_index ON test_reference_table(test_id);
|
||||
}
|
||||
|
||||
step "s2-create-reference-table-2"
|
||||
{
|
||||
SELECT create_reference_table('test_reference_table_2');
|
||||
SELECT create_reference_table('test_reference_table_2');
|
||||
}
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s2-print-content"
|
||||
{
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_reference_table', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_reference_table', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
}
|
||||
|
||||
step "s2-print-content-2"
|
||||
{
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_reference_table_2', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_reference_table_2', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
}
|
||||
|
||||
step "s2-print-index-count"
|
||||
{
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
}
|
||||
|
||||
// verify that copy/insert gets the invalidation and re-builts its metadata cache
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
setup
|
||||
{
|
||||
SELECT 1;
|
||||
CREATE OR REPLACE FUNCTION public.wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
||||
SELECT 1;
|
||||
CREATE OR REPLACE FUNCTION public.wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
|
@ -9,93 +9,93 @@ setup
|
|||
|
||||
teardown
|
||||
{
|
||||
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
|
||||
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-add-node-1"
|
||||
{
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
}
|
||||
|
||||
step "s1-add-node-2"
|
||||
{
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
}
|
||||
|
||||
step "s1-add-inactive-1"
|
||||
{
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
|
||||
}
|
||||
|
||||
step "s1-activate-node-1"
|
||||
{
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
}
|
||||
|
||||
step "s1-disable-node-1"
|
||||
{
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
}
|
||||
|
||||
step "s1-remove-node-1"
|
||||
{
|
||||
SELECT * FROM master_remove_node('localhost', 57637);
|
||||
SELECT * FROM master_remove_node('localhost', 57637);
|
||||
}
|
||||
|
||||
step "s1-abort"
|
||||
{
|
||||
ABORT;
|
||||
ABORT;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s1-show-nodes"
|
||||
{
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-add-node-1"
|
||||
{
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
}
|
||||
|
||||
step "s2-add-node-2"
|
||||
{
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
}
|
||||
|
||||
step "s2-activate-node-1"
|
||||
{
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
}
|
||||
|
||||
step "s2-disable-node-1"
|
||||
{
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
}
|
||||
|
||||
step "s2-remove-node-1"
|
||||
{
|
||||
SELECT * FROM master_remove_node('localhost', 57637);
|
||||
SELECT * FROM master_remove_node('localhost', 57637);
|
||||
}
|
||||
|
||||
step "s2-remove-node-2"
|
||||
{
|
||||
SELECT * FROM master_remove_node('localhost', 57638);
|
||||
SELECT * FROM master_remove_node('localhost', 57638);
|
||||
}
|
||||
|
||||
// session 1 adds a node, session 2 removes it, should be ok
|
||||
|
|
|
@ -3,20 +3,20 @@
|
|||
|
||||
setup
|
||||
{
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('logical_replicate_placement', 'x');
|
||||
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('logical_replicate_placement', 'x');
|
||||
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard;
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard;
|
||||
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE logical_replicate_placement;
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE logical_replicate_placement;
|
||||
}
|
||||
|
||||
|
||||
|
@ -24,22 +24,22 @@ session "s1"
|
|||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-move-placement"
|
||||
{
|
||||
SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes') FROM selected_shard;
|
||||
SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes') FROM selected_shard;
|
||||
}
|
||||
|
||||
step "s1-end"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s1-select"
|
||||
{
|
||||
SELECT * FROM logical_replicate_placement order by y;
|
||||
SELECT * FROM logical_replicate_placement order by y;
|
||||
}
|
||||
|
||||
step "s1-insert"
|
||||
|
@ -56,7 +56,7 @@ session "s2"
|
|||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
|
@ -88,22 +88,22 @@ step "s2-upsert"
|
|||
|
||||
step "s2-copy"
|
||||
{
|
||||
COPY logical_replicate_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5\n15,30"' WITH CSV;
|
||||
COPY logical_replicate_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5\n15,30"' WITH CSV;
|
||||
}
|
||||
|
||||
step "s2-truncate"
|
||||
{
|
||||
TRUNCATE logical_replicate_placement;
|
||||
TRUNCATE logical_replicate_placement;
|
||||
}
|
||||
|
||||
step "s2-alter-table"
|
||||
{
|
||||
ALTER TABLE logical_replicate_placement ADD COLUMN z INT;
|
||||
ALTER TABLE logical_replicate_placement ADD COLUMN z INT;
|
||||
}
|
||||
|
||||
step "s2-end"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
permutation "s1-begin" "s2-begin" "s2-insert" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution"
|
||||
|
@ -114,4 +114,3 @@ permutation "s1-insert" "s1-begin" "s2-begin" "s2-select" "s1-move-placement" "s
|
|||
permutation "s1-begin" "s2-begin" "s2-copy" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution"
|
||||
permutation "s1-insert" "s1-begin" "s2-begin" "s2-truncate" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution"
|
||||
permutation "s1-begin" "s2-begin" "s2-alter-table" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution"
|
||||
|
||||
|
|
|
@ -3,48 +3,48 @@
|
|||
|
||||
setup
|
||||
{
|
||||
SET citus.enable_metadata_sync TO off;
|
||||
CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT VOLATILE
|
||||
AS 'citus', $$start_session_level_connection_to_node$$;
|
||||
SET citus.enable_metadata_sync TO off;
|
||||
CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT VOLATILE
|
||||
AS 'citus', $$start_session_level_connection_to_node$$;
|
||||
|
||||
CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT VOLATILE
|
||||
AS 'citus', $$run_commands_on_session_level_connection_to_node$$;
|
||||
CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT VOLATILE
|
||||
AS 'citus', $$run_commands_on_session_level_connection_to_node$$;
|
||||
|
||||
CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT VOLATILE
|
||||
AS 'citus', $$stop_session_level_connection_to_node$$;
|
||||
RESET citus.enable_metadata_sync;
|
||||
CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT VOLATILE
|
||||
AS 'citus', $$stop_session_level_connection_to_node$$;
|
||||
RESET citus.enable_metadata_sync;
|
||||
|
||||
-- start_metadata_sync_to_node can not be run inside a transaction block
|
||||
-- following is a workaround to overcome that
|
||||
-- port numbers are hard coded at the moment
|
||||
SELECT master_run_on_worker(
|
||||
ARRAY['localhost']::text[],
|
||||
ARRAY[57636]::int[],
|
||||
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
|
||||
false)
|
||||
FROM pg_dist_node;
|
||||
-- start_metadata_sync_to_node can not be run inside a transaction block
|
||||
-- following is a workaround to overcome that
|
||||
-- port numbers are hard coded at the moment
|
||||
SELECT master_run_on_worker(
|
||||
ARRAY['localhost']::text[],
|
||||
ARRAY[57636]::int[],
|
||||
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
|
||||
false)
|
||||
FROM pg_dist_node;
|
||||
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('logical_replicate_placement', 'x');
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('logical_replicate_placement', 'x');
|
||||
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard;
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE logical_replicate_placement;
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE logical_replicate_placement;
|
||||
}
|
||||
|
||||
|
||||
|
@ -52,22 +52,22 @@ session "s1"
|
|||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-move-placement"
|
||||
{
|
||||
SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes') FROM selected_shard;
|
||||
SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes') FROM selected_shard;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s1-select"
|
||||
{
|
||||
SELECT * FROM logical_replicate_placement order by y;
|
||||
SELECT * FROM logical_replicate_placement order by y;
|
||||
}
|
||||
|
||||
step "s1-insert"
|
||||
|
@ -84,12 +84,12 @@ session "s2"
|
|||
|
||||
step "s2-start-session-level-connection"
|
||||
{
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
}
|
||||
|
||||
step "s2-begin-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
|
@ -114,16 +114,15 @@ step "s2-update"
|
|||
|
||||
step "s2-commit-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
}
|
||||
|
||||
step "s2-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
permutation "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection"
|
||||
permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection"
|
||||
permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-delete" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection"
|
||||
permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-get-shard-distribution" "s2-stop-connection"
|
||||
|
||||
|
|
|
@ -2,19 +2,19 @@
|
|||
// so setting the corresponding shard here is useful
|
||||
setup
|
||||
{
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('logical_replicate_placement', 'x');
|
||||
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('logical_replicate_placement', 'x');
|
||||
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard;
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE logical_replicate_placement;
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE logical_replicate_placement;
|
||||
}
|
||||
|
||||
|
||||
|
@ -22,7 +22,7 @@ session "s1"
|
|||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-move-placement"
|
||||
|
@ -32,12 +32,12 @@ step "s1-move-placement"
|
|||
|
||||
step "s1-end"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s1-select"
|
||||
{
|
||||
SELECT * FROM logical_replicate_placement order by y;
|
||||
SELECT * FROM logical_replicate_placement order by y;
|
||||
}
|
||||
|
||||
step "s1-insert"
|
||||
|
@ -47,7 +47,7 @@ step "s1-insert"
|
|||
|
||||
step "s1-get-shard-distribution"
|
||||
{
|
||||
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
|
||||
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
@ -91,7 +91,7 @@ step "s2-upsert"
|
|||
|
||||
step "s2-end"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
permutation "s1-begin" "s2-begin" "s2-insert" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution"
|
||||
|
@ -100,4 +100,3 @@ permutation "s1-insert" "s1-begin" "s2-begin" "s2-update" "s1-move-placement" "s
|
|||
permutation "s1-insert" "s1-begin" "s2-begin" "s2-delete" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution"
|
||||
permutation "s1-insert" "s1-begin" "s2-begin" "s2-select" "s1-move-placement" "s2-end" "s1-end" "s1-get-shard-distribution"
|
||||
permutation "s1-insert" "s1-begin" "s2-begin" "s2-select-for-update" "s1-move-placement" "s2-end" "s1-end" "s1-get-shard-distribution"
|
||||
|
||||
|
|
|
@ -2,47 +2,47 @@
|
|||
// so setting the corresponding shard here is useful
|
||||
setup
|
||||
{
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT VOLATILE
|
||||
AS 'citus', $$start_session_level_connection_to_node$$;
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT VOLATILE
|
||||
AS 'citus', $$start_session_level_connection_to_node$$;
|
||||
|
||||
CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT VOLATILE
|
||||
AS 'citus', $$run_commands_on_session_level_connection_to_node$$;
|
||||
CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT VOLATILE
|
||||
AS 'citus', $$run_commands_on_session_level_connection_to_node$$;
|
||||
|
||||
CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT VOLATILE
|
||||
AS 'citus', $$stop_session_level_connection_to_node$$;
|
||||
RESET citus.enable_ddl_propagation;
|
||||
CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT VOLATILE
|
||||
AS 'citus', $$stop_session_level_connection_to_node$$;
|
||||
RESET citus.enable_ddl_propagation;
|
||||
|
||||
-- start_metadata_sync_to_node can not be run inside a transaction block
|
||||
-- following is a workaround to overcome that
|
||||
-- port numbers are hard coded at the moment
|
||||
SELECT master_run_on_worker(
|
||||
ARRAY['localhost']::text[],
|
||||
ARRAY[57636]::int[],
|
||||
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
|
||||
false)
|
||||
FROM pg_dist_node;
|
||||
-- start_metadata_sync_to_node can not be run inside a transaction block
|
||||
-- following is a workaround to overcome that
|
||||
-- port numbers are hard coded at the moment
|
||||
SELECT master_run_on_worker(
|
||||
ARRAY['localhost']::text[],
|
||||
ARRAY[57636]::int[],
|
||||
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
|
||||
false)
|
||||
FROM pg_dist_node;
|
||||
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
|
||||
SET citus.shard_count TO 8;
|
||||
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('logical_replicate_placement', 'x');
|
||||
SET citus.shard_count TO 8;
|
||||
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('logical_replicate_placement', 'x');
|
||||
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard;
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE logical_replicate_placement;
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE logical_replicate_placement;
|
||||
}
|
||||
|
||||
|
||||
|
@ -50,79 +50,79 @@ session "s1"
|
|||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-move-placement"
|
||||
{
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes');
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes');
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s1-select"
|
||||
{
|
||||
SELECT * FROM logical_replicate_placement order by y;
|
||||
SELECT * FROM logical_replicate_placement order by y;
|
||||
}
|
||||
|
||||
step "s1-insert"
|
||||
{
|
||||
INSERT INTO logical_replicate_placement VALUES (15, 15);
|
||||
INSERT INTO logical_replicate_placement VALUES (15, 15);
|
||||
}
|
||||
|
||||
step "s1-get-shard-distribution"
|
||||
{
|
||||
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
|
||||
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-start-session-level-connection"
|
||||
{
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
}
|
||||
|
||||
step "s2-begin-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM logical_replicate_placement ORDER BY y');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM logical_replicate_placement ORDER BY y');
|
||||
}
|
||||
|
||||
step "s2-insert"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO logical_replicate_placement VALUES (15, 15)');
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO logical_replicate_placement VALUES (15, 15)');
|
||||
}
|
||||
|
||||
step "s2-select-for-update"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM logical_replicate_placement WHERE x=15 FOR UPDATE');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM logical_replicate_placement WHERE x=15 FOR UPDATE');
|
||||
}
|
||||
|
||||
step "s2-delete"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('DELETE FROM logical_replicate_placement WHERE x = 15');
|
||||
SELECT run_commands_on_session_level_connection_to_node('DELETE FROM logical_replicate_placement WHERE x = 15');
|
||||
}
|
||||
|
||||
step "s2-update"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15');
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15');
|
||||
}
|
||||
|
||||
step "s2-commit-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
}
|
||||
|
||||
step "s2-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
permutation "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection"
|
||||
|
@ -130,4 +130,3 @@ permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin
|
|||
permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-delete" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection"
|
||||
permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-get-shard-distribution" "s2-stop-connection"
|
||||
permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-get-shard-distribution" "s2-stop-connection"
|
||||
|
||||
|
|
|
@ -2,25 +2,25 @@
|
|||
|
||||
setup
|
||||
{
|
||||
SET citus.shard_count to 2;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SET citus.shard_count to 2;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SELECT setval('pg_dist_shardid_seq', 1500000);
|
||||
|
||||
-- Cleanup any orphan shards that might be left over from a previous run.
|
||||
CREATE OR REPLACE FUNCTION run_try_drop_marked_resources()
|
||||
RETURNS VOID
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT VOLATILE;
|
||||
-- Cleanup any orphan shards that might be left over from a previous run.
|
||||
CREATE OR REPLACE FUNCTION run_try_drop_marked_resources()
|
||||
RETURNS VOID
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT VOLATILE;
|
||||
|
||||
CREATE TABLE to_split_table (id int, value int);
|
||||
SELECT create_distributed_table('to_split_table', 'id');
|
||||
CREATE TABLE to_split_table (id int, value int);
|
||||
SELECT create_distributed_table('to_split_table', 'id');
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
SELECT run_try_drop_marked_resources();
|
||||
SELECT run_try_drop_marked_resources();
|
||||
|
||||
DROP TABLE to_split_table;
|
||||
DROP TABLE to_split_table;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -37,51 +37,51 @@ step "s1-begin"
|
|||
// cache all placements
|
||||
step "s1-load-cache"
|
||||
{
|
||||
-- Indirect way to load cache.
|
||||
TRUNCATE to_split_table;
|
||||
-- Indirect way to load cache.
|
||||
TRUNCATE to_split_table;
|
||||
}
|
||||
|
||||
step "s1-insert"
|
||||
{
|
||||
-- Id '123456789' maps to shard 1500002.
|
||||
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
|
||||
-- Id '123456789' maps to shard 1500002.
|
||||
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
|
||||
|
||||
INSERT INTO to_split_table VALUES (123456789, 1);
|
||||
INSERT INTO to_split_table VALUES (123456789, 1);
|
||||
}
|
||||
|
||||
step "s1-update"
|
||||
{
|
||||
UPDATE to_split_table SET value = 111 WHERE id = 123456789;
|
||||
UPDATE to_split_table SET value = 111 WHERE id = 123456789;
|
||||
}
|
||||
|
||||
step "s1-delete"
|
||||
{
|
||||
DELETE FROM to_split_table WHERE id = 123456789;
|
||||
DELETE FROM to_split_table WHERE id = 123456789;
|
||||
}
|
||||
|
||||
step "s1-select"
|
||||
{
|
||||
SELECT count(*) FROM to_split_table WHERE id = 123456789;
|
||||
SELECT count(*) FROM to_split_table WHERE id = 123456789;
|
||||
}
|
||||
|
||||
step "s1-ddl"
|
||||
{
|
||||
CREATE INDEX test_table_index ON to_split_table(id);
|
||||
CREATE INDEX test_table_index ON to_split_table(id);
|
||||
}
|
||||
|
||||
step "s1-copy"
|
||||
{
|
||||
COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
|
||||
COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
|
||||
}
|
||||
|
||||
step "s1-lock-to-split-shard"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN; LOCK TABLE to_split_table_1500002 IN ACCESS SHARE MODE;');
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN; LOCK TABLE to_split_table_1500002 IN ACCESS SHARE MODE;');
|
||||
}
|
||||
|
||||
step "s1-start-connection"
|
||||
{
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
}
|
||||
|
||||
step "s1-stop-connection"
|
||||
|
@ -104,88 +104,88 @@ step "s1-release-split-advisory-lock"
|
|||
|
||||
step "s1-run-cleaner"
|
||||
{
|
||||
SELECT run_try_drop_marked_resources();
|
||||
SELECT run_try_drop_marked_resources();
|
||||
}
|
||||
|
||||
step "s1-show-pg_dist_cleanup"
|
||||
{
|
||||
SELECT object_name, object_type, policy_type FROM pg_dist_cleanup;
|
||||
SELECT object_name, object_type, policy_type FROM pg_dist_cleanup;
|
||||
}
|
||||
|
||||
step "s1-blocking-shard-split"
|
||||
{
|
||||
SELECT pg_catalog.citus_split_shard_by_split_points(
|
||||
1500001,
|
||||
ARRAY['-1073741824'],
|
||||
ARRAY[1, 2],
|
||||
'block_writes');
|
||||
SELECT pg_catalog.citus_split_shard_by_split_points(
|
||||
1500001,
|
||||
ARRAY['-1073741824'],
|
||||
ARRAY[1, 2],
|
||||
'block_writes');
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-print-locks"
|
||||
{
|
||||
SELECT * FROM master_run_on_worker(
|
||||
ARRAY['localhost']::text[],
|
||||
ARRAY[57638]::int[],
|
||||
ARRAY[
|
||||
'SELECT CONCAT(relation::regclass, ''-'', locktype, ''-'', mode) AS LockInfo FROM pg_locks
|
||||
WHERE relation::regclass::text = ''to_split_table_1500002'';'
|
||||
]::text[],
|
||||
false);
|
||||
ARRAY['localhost']::text[],
|
||||
ARRAY[57638]::int[],
|
||||
ARRAY[
|
||||
'SELECT CONCAT(relation::regclass, ''-'', locktype, ''-'', mode) AS LockInfo FROM pg_locks
|
||||
WHERE relation::regclass::text = ''to_split_table_1500002'';'
|
||||
]::text[],
|
||||
false);
|
||||
}
|
||||
|
||||
step "s2-show-pg_dist_cleanup"
|
||||
{
|
||||
SELECT object_name, object_type, policy_type FROM pg_dist_cleanup;
|
||||
SELECT object_name, object_type, policy_type FROM pg_dist_cleanup;
|
||||
}
|
||||
|
||||
step "s2-blocking-shard-split"
|
||||
{
|
||||
SELECT pg_catalog.citus_split_shard_by_split_points(
|
||||
1500002,
|
||||
ARRAY['1073741824'],
|
||||
ARRAY[1, 2],
|
||||
'block_writes');
|
||||
SELECT pg_catalog.citus_split_shard_by_split_points(
|
||||
1500002,
|
||||
ARRAY['1073741824'],
|
||||
ARRAY[1, 2],
|
||||
'block_writes');
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s2-print-cluster"
|
||||
{
|
||||
-- row count per shard
|
||||
SELECT
|
||||
nodeport, shardid, success, result
|
||||
FROM
|
||||
run_command_on_placements('to_split_table', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport, shardid;
|
||||
-- row count per shard
|
||||
SELECT
|
||||
nodeport, shardid, success, result
|
||||
FROM
|
||||
run_command_on_placements('to_split_table', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport, shardid;
|
||||
|
||||
-- rows
|
||||
SELECT id, value FROM to_split_table ORDER BY id, value;
|
||||
-- rows
|
||||
SELECT id, value FROM to_split_table ORDER BY id, value;
|
||||
}
|
||||
|
||||
step "s2-print-index-count"
|
||||
{
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
}
|
||||
|
||||
// Run shard split while concurrently performing DML and index creation
|
||||
|
|
|
@ -2,26 +2,26 @@ setup
|
|||
{
|
||||
SELECT setval('pg_dist_shardid_seq', 1500000);
|
||||
SET citus.shard_count to 2;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
|
||||
CREATE TABLE reference_table (id int PRIMARY KEY, value int);
|
||||
SELECT create_reference_table('reference_table');
|
||||
CREATE TABLE reference_table (id int PRIMARY KEY, value int);
|
||||
SELECT create_reference_table('reference_table');
|
||||
|
||||
CREATE TABLE table_to_split (id int, value int);
|
||||
SELECT create_distributed_table('table_to_split', 'id');
|
||||
CREATE TABLE table_to_split (id int, value int);
|
||||
SELECT create_distributed_table('table_to_split', 'id');
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
-- Cleanup any orphan shards that might be left over from a previous run.
|
||||
CREATE OR REPLACE FUNCTION run_try_drop_marked_resources()
|
||||
RETURNS VOID
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT VOLATILE;
|
||||
SELECT run_try_drop_marked_resources();
|
||||
-- Cleanup any orphan shards that might be left over from a previous run.
|
||||
CREATE OR REPLACE FUNCTION run_try_drop_marked_resources()
|
||||
RETURNS VOID
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT VOLATILE;
|
||||
SELECT run_try_drop_marked_resources();
|
||||
|
||||
DROP TABLE table_to_split CASCADE;
|
||||
DROP TABLE reference_table CASCADE;
|
||||
DROP TABLE table_to_split CASCADE;
|
||||
DROP TABLE reference_table CASCADE;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -33,72 +33,72 @@ step "s1-begin"
|
|||
|
||||
step "s1-insert"
|
||||
{
|
||||
INSERT INTO reference_table VALUES (5, 10);
|
||||
INSERT INTO reference_table VALUES (5, 10);
|
||||
}
|
||||
|
||||
step "s1-update"
|
||||
{
|
||||
UPDATE reference_table SET value = 5 WHERE id = 5;
|
||||
UPDATE reference_table SET value = 5 WHERE id = 5;
|
||||
}
|
||||
|
||||
step "s1-delete"
|
||||
{
|
||||
DELETE FROM reference_table WHERE id = 5;
|
||||
DELETE FROM reference_table WHERE id = 5;
|
||||
}
|
||||
|
||||
step "s1-ddl"
|
||||
{
|
||||
CREATE INDEX reference_table_index ON reference_table(id);
|
||||
CREATE INDEX reference_table_index ON reference_table(id);
|
||||
}
|
||||
|
||||
step "s1-copy"
|
||||
{
|
||||
COPY reference_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
|
||||
COPY reference_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-blocking-shard-split"
|
||||
{
|
||||
SELECT pg_catalog.citus_split_shard_by_split_points(
|
||||
1500002,
|
||||
ARRAY['-1073741824'],
|
||||
ARRAY[1, 2],
|
||||
'block_writes');
|
||||
SELECT pg_catalog.citus_split_shard_by_split_points(
|
||||
1500002,
|
||||
ARRAY['-1073741824'],
|
||||
ARRAY[1, 2],
|
||||
'block_writes');
|
||||
}
|
||||
|
||||
step "s2-add-fkey"
|
||||
{
|
||||
ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id);
|
||||
ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id);
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s2-print-cluster"
|
||||
{
|
||||
-- row count per shard
|
||||
SELECT
|
||||
nodeport, shardid, success, result
|
||||
FROM
|
||||
run_command_on_placements('table_to_split', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport, shardid;
|
||||
-- row count per shard
|
||||
SELECT
|
||||
nodeport, shardid, success, result
|
||||
FROM
|
||||
run_command_on_placements('table_to_split', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport, shardid;
|
||||
|
||||
-- rows
|
||||
SELECT id, value FROM table_to_split ORDER BY id, value;
|
||||
-- rows
|
||||
SELECT id, value FROM table_to_split ORDER BY id, value;
|
||||
}
|
||||
|
||||
// Run shard split while concurrently performing an DML and index creation on the
|
||||
|
|
|
@ -44,17 +44,17 @@ step "s1-alter-table"
|
|||
|
||||
step "s1-select"
|
||||
{
|
||||
SELECT count(*) FROM test_table;
|
||||
SELECT count(*) FROM test_table;
|
||||
}
|
||||
|
||||
step "s1-select-router"
|
||||
{
|
||||
SELECT count(*) FROM test_table WHERE column1 = 55;
|
||||
SELECT count(*) FROM test_table WHERE column1 = 55;
|
||||
}
|
||||
|
||||
step "s1-insert"
|
||||
{
|
||||
INSERT INTO test_table VALUES (100, 100);
|
||||
INSERT INTO test_table VALUES (100, 100);
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
|
@ -66,39 +66,39 @@ session "s2"
|
|||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-rollback"
|
||||
{
|
||||
ROLLBACK;
|
||||
ROLLBACK;
|
||||
}
|
||||
|
||||
step "s2-sleep"
|
||||
{
|
||||
SELECT pg_sleep(0.5);
|
||||
SELECT pg_sleep(0.5);
|
||||
}
|
||||
|
||||
step "s2-view-dist"
|
||||
{
|
||||
SELECT query, citus_nodename_for_nodeid(citus_nodeid_for_gpid(global_pid)), citus_nodeport_for_nodeid(citus_nodeid_for_gpid(global_pid)), state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE ALL(VALUES('%pg_prepared_xacts%'), ('%COMMIT%'), ('%BEGIN%'), ('%pg_catalog.pg_isolation_test_session_is_blocked%'), ('%citus_add_node%'), ('%csa_from_one_node%')) AND backend_type = 'client backend' ORDER BY query DESC;
|
||||
SELECT query, citus_nodename_for_nodeid(citus_nodeid_for_gpid(global_pid)), citus_nodeport_for_nodeid(citus_nodeid_for_gpid(global_pid)), state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE ALL(VALUES('%pg_prepared_xacts%'), ('%COMMIT%'), ('%BEGIN%'), ('%pg_catalog.pg_isolation_test_session_is_blocked%'), ('%citus_add_node%'), ('%csa_from_one_node%')) AND backend_type = 'client backend' ORDER BY query DESC;
|
||||
}
|
||||
|
||||
session "s3"
|
||||
|
||||
step "s3-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s3-rollback"
|
||||
{
|
||||
ROLLBACK;
|
||||
ROLLBACK;
|
||||
}
|
||||
|
||||
step "s3-view-worker"
|
||||
{
|
||||
SELECT query, citus_nodename_for_nodeid(citus_nodeid_for_gpid(global_pid)), citus_nodeport_for_nodeid(citus_nodeid_for_gpid(global_pid)), state, wait_event_type, wait_event, usename, datname FROM citus_stat_activity WHERE query NOT ILIKE ALL(VALUES('%pg_prepared_xacts%'), ('%COMMIT%'), ('%csa_from_one_node%')) AND is_worker_query = true AND backend_type = 'client backend' ORDER BY query DESC;
|
||||
SELECT query, citus_nodename_for_nodeid(citus_nodeid_for_gpid(global_pid)), citus_nodeport_for_nodeid(citus_nodeid_for_gpid(global_pid)), state, wait_event_type, wait_event, usename, datname FROM citus_stat_activity WHERE query NOT ILIKE ALL(VALUES('%pg_prepared_xacts%'), ('%COMMIT%'), ('%csa_from_one_node%')) AND is_worker_query = true AND backend_type = 'client backend' ORDER BY query DESC;
|
||||
}
|
||||
|
||||
// we prefer to sleep before "s2-view-dist" so that we can ensure
|
||||
|
|
|
@ -18,32 +18,32 @@ session "s1"
|
|||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-alter-dist-table"
|
||||
{
|
||||
ALTER TABLE dist_table ADD COLUMN data text;
|
||||
ALTER TABLE dist_table ADD COLUMN data text;
|
||||
}
|
||||
|
||||
step "s1-record-gpid"
|
||||
{
|
||||
SELECT citus_backend_gpid() INTO selected_gpid;
|
||||
SELECT citus_backend_gpid() INTO selected_gpid;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-show-locks"
|
||||
{
|
||||
SELECT relation_name, citus_nodename_for_nodeid(nodeid), citus_nodeport_for_nodeid(nodeid), mode
|
||||
FROM citus_locks
|
||||
WHERE global_pid IN (SELECT * FROM selected_gpid) AND relation_name LIKE 'dist_table%'
|
||||
ORDER BY 1, 2, 3, 4;
|
||||
SELECT relation_name, citus_nodename_for_nodeid(nodeid), citus_nodeport_for_nodeid(nodeid), mode
|
||||
FROM citus_locks
|
||||
WHERE global_pid IN (SELECT * FROM selected_gpid) AND relation_name LIKE 'dist_table%'
|
||||
ORDER BY 1, 2, 3, 4;
|
||||
}
|
||||
|
||||
permutation "s1-record-gpid" "s1-begin" "s2-show-locks" "s1-alter-dist-table" "s2-show-locks" "s1-commit" "s2-show-locks"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
setup
|
||||
{
|
||||
CREATE TABLE test_concurrent_dml (test_id integer NOT NULL, data text);
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
SELECT create_distributed_table('test_concurrent_dml', 'test_id', 'hash', shard_count:=4);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
setup
|
||||
{
|
||||
CREATE TABLE concurrent_table_1(id int PRIMARY KEY);
|
||||
CREATE TABLE concurrent_table_2(id int PRIMARY KEY);
|
||||
CREATE TABLE concurrent_table_3(id int PRIMARY KEY);
|
||||
CREATE TABLE concurrent_table_4(id int PRIMARY KEY);
|
||||
CREATE TABLE concurrent_table_5(id int PRIMARY KEY);
|
||||
CREATE TABLE concurrent_table_1(id int PRIMARY KEY);
|
||||
CREATE TABLE concurrent_table_2(id int PRIMARY KEY);
|
||||
CREATE TABLE concurrent_table_3(id int PRIMARY KEY);
|
||||
CREATE TABLE concurrent_table_4(id int PRIMARY KEY);
|
||||
CREATE TABLE concurrent_table_5(id int PRIMARY KEY);
|
||||
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table('concurrent_table_1', 'id', colocate_with := 'none');
|
||||
SELECT create_distributed_table('concurrent_table_4', 'id');
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table('concurrent_table_1', 'id', colocate_with := 'none');
|
||||
SELECT create_distributed_table('concurrent_table_4', 'id');
|
||||
|
||||
SELECT nodeid INTO first_node_id FROM pg_dist_node WHERE nodeport = 57637;
|
||||
SELECT nodeid INTO first_node_id FROM pg_dist_node WHERE nodeport = 57637;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE concurrent_table_1, concurrent_table_2, concurrent_table_3, concurrent_table_4, concurrent_table_5, first_node_id CASCADE;
|
||||
DROP TABLE concurrent_table_1, concurrent_table_2, concurrent_table_3, concurrent_table_4, concurrent_table_5, first_node_id CASCADE;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -23,21 +23,21 @@ session "s1"
|
|||
|
||||
step "s1-move-shard-logical"
|
||||
{
|
||||
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1)
|
||||
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638) FROM shardid;
|
||||
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1)
|
||||
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638) FROM shardid;
|
||||
}
|
||||
|
||||
step "s1-move-shard-block"
|
||||
{
|
||||
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1)
|
||||
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638, 'block_writes') FROM shardid;
|
||||
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1)
|
||||
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638, 'block_writes') FROM shardid;
|
||||
}
|
||||
|
||||
step "s1-split-block"
|
||||
{
|
||||
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1)
|
||||
SELECT citus_split_shard_by_split_points(
|
||||
shardid.shardid, ARRAY['2113265921'], ARRAY[(SELECT * FROM first_node_id), (SELECT * FROM first_node_id)], 'block_writes') FROM shardid;
|
||||
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1)
|
||||
SELECT citus_split_shard_by_split_points(
|
||||
shardid.shardid, ARRAY['2113265921'], ARRAY[(SELECT * FROM first_node_id), (SELECT * FROM first_node_id)], 'block_writes') FROM shardid;
|
||||
}
|
||||
|
||||
|
||||
|
@ -45,44 +45,44 @@ session "s2"
|
|||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-create_distributed_table"
|
||||
{
|
||||
SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1');
|
||||
SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1');
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s3"
|
||||
|
||||
step "s3-create_distributed_table"
|
||||
{
|
||||
SELECT create_distributed_table('concurrent_table_3', 'id', colocate_with := 'concurrent_table_1');
|
||||
SELECT create_distributed_table('concurrent_table_3', 'id', colocate_with := 'concurrent_table_1');
|
||||
}
|
||||
|
||||
step "s3-sanity-check"
|
||||
{
|
||||
SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL;
|
||||
SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL;
|
||||
}
|
||||
|
||||
step "s3-sanity-check-2"
|
||||
{
|
||||
SELECT count(*) FROM concurrent_table_1 JOIN concurrent_table_2 USING (id);
|
||||
SELECT count(*) FROM concurrent_table_1 JOIN concurrent_table_2 USING (id);
|
||||
}
|
||||
|
||||
step "s3-sanity-check-3"
|
||||
{
|
||||
SELECT count(DISTINCT colocationid) FROM pg_dist_partition WHERE logicalrelid IN ('concurrent_table_4', 'concurrent_table_5');
|
||||
SELECT count(DISTINCT colocationid) FROM pg_dist_partition WHERE logicalrelid IN ('concurrent_table_4', 'concurrent_table_5');
|
||||
}
|
||||
|
||||
step "s3-sanity-check-4"
|
||||
{
|
||||
SELECT count(*) FROM concurrent_table_4 JOIN concurrent_table_5 USING (id);
|
||||
SELECT count(*) FROM concurrent_table_4 JOIN concurrent_table_5 USING (id);
|
||||
}
|
||||
|
||||
|
||||
|
@ -90,36 +90,36 @@ session "s4"
|
|||
|
||||
step "s4-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s4-commit"
|
||||
{
|
||||
commit;
|
||||
commit;
|
||||
}
|
||||
|
||||
step "s4-move-shard-logical"
|
||||
{
|
||||
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_4'::regclass ORDER BY shardid LIMIT 1)
|
||||
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638) FROM shardid;
|
||||
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_4'::regclass ORDER BY shardid LIMIT 1)
|
||||
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638) FROM shardid;
|
||||
}
|
||||
|
||||
step "s4-move-shard-block"
|
||||
{
|
||||
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_4'::regclass ORDER BY shardid LIMIT 1)
|
||||
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638, 'block_writes') FROM shardid;
|
||||
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_4'::regclass ORDER BY shardid LIMIT 1)
|
||||
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638, 'block_writes') FROM shardid;
|
||||
}
|
||||
|
||||
session "s5"
|
||||
|
||||
step "s5-setup-rep-factor"
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
}
|
||||
|
||||
step "s5-create_implicit_colocated_distributed_table"
|
||||
{
|
||||
SELECT create_distributed_table('concurrent_table_5', 'id');
|
||||
SELECT create_distributed_table('concurrent_table_5', 'id');
|
||||
}
|
||||
|
||||
|
||||
|
@ -134,4 +134,3 @@ permutation "s2-begin" "s2-create_distributed_table" "s1-split-block" "s2-commi
|
|||
// same test above, but this time implicitly colocated tables
|
||||
permutation "s4-begin" "s4-move-shard-logical" "s5-setup-rep-factor" "s5-create_implicit_colocated_distributed_table" "s4-commit" "s3-sanity-check" "s3-sanity-check-3" "s3-sanity-check-4"
|
||||
permutation "s4-begin" "s4-move-shard-block" "s5-setup-rep-factor" "s5-create_implicit_colocated_distributed_table" "s4-commit" "s3-sanity-check" "s3-sanity-check-3" "s3-sanity-check-4"
|
||||
|
||||
|
|
|
@ -2,18 +2,18 @@
|
|||
// so setting the corresponding shard here is useful
|
||||
setup
|
||||
{
|
||||
SET citus.shard_count TO 2;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE test_hash_table (x int, y int);
|
||||
SELECT create_distributed_table('test_hash_table', 'x');
|
||||
SET citus.shard_count TO 2;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE test_hash_table (x int, y int);
|
||||
SELECT create_distributed_table('test_hash_table', 'x');
|
||||
|
||||
SELECT get_shard_id_for_distribution_column('test_hash_table', 5) INTO selected_shard_for_test_table;
|
||||
SELECT get_shard_id_for_distribution_column('test_hash_table', 5) INTO selected_shard_for_test_table;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE test_hash_table;
|
||||
DROP TABLE selected_shard_for_test_table;
|
||||
DROP TABLE test_hash_table;
|
||||
DROP TABLE selected_shard_for_test_table;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -23,29 +23,29 @@ session "s1"
|
|||
// but with copy all placements are cached
|
||||
step "s1-load-cache"
|
||||
{
|
||||
COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
|
||||
COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
|
||||
}
|
||||
|
||||
step "s1-repair-placement"
|
||||
{
|
||||
SELECT citus_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
|
||||
SELECT citus_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-delete-inactive"
|
||||
{
|
||||
DELETE FROM pg_dist_shard_placement WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638;
|
||||
DELETE FROM pg_dist_shard_placement WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638;
|
||||
}
|
||||
|
||||
step "s2-repair-placement"
|
||||
{
|
||||
SELECT citus_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, transfer_mode := 'block_writes');
|
||||
SELECT citus_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, transfer_mode := 'block_writes');
|
||||
}
|
||||
|
||||
// since test_hash_table has rep > 1 simple select query doesn't hit all placements
|
||||
|
@ -53,12 +53,12 @@ step "s2-repair-placement"
|
|||
// but with copy all placements are cached
|
||||
step "s2-load-cache"
|
||||
{
|
||||
COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
|
||||
COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
// two concurrent shard repairs on the same shard
|
||||
|
|
|
@ -2,18 +2,18 @@
|
|||
// so setting the corresponding shard here is useful
|
||||
setup
|
||||
{
|
||||
SET citus.shard_count TO 2;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE test_repair_placement_vs_modification (x int, y int);
|
||||
SELECT create_distributed_table('test_repair_placement_vs_modification', 'x');
|
||||
SET citus.shard_count TO 2;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE test_repair_placement_vs_modification (x int, y int);
|
||||
SELECT create_distributed_table('test_repair_placement_vs_modification', 'x');
|
||||
|
||||
SELECT get_shard_id_for_distribution_column('test_repair_placement_vs_modification', 5) INTO selected_shard;
|
||||
SELECT get_shard_id_for_distribution_column('test_repair_placement_vs_modification', 5) INTO selected_shard;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE test_repair_placement_vs_modification;
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE test_repair_placement_vs_modification;
|
||||
DROP TABLE selected_shard;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -21,93 +21,93 @@ session "s1"
|
|||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
SET LOCAL citus.select_opens_transaction_block TO off;
|
||||
SET LOCAL citus.select_opens_transaction_block TO off;
|
||||
}
|
||||
|
||||
// since test_repair_placement_vs_modification has rep > 1 simple select query doesn't hit all placements
|
||||
// hence not all placements are cached
|
||||
step "s1-load-cache"
|
||||
{
|
||||
TRUNCATE test_repair_placement_vs_modification;
|
||||
TRUNCATE test_repair_placement_vs_modification;
|
||||
}
|
||||
|
||||
step "s1-insert"
|
||||
{
|
||||
INSERT INTO test_repair_placement_vs_modification VALUES (5, 10);
|
||||
INSERT INTO test_repair_placement_vs_modification VALUES (5, 10);
|
||||
}
|
||||
|
||||
step "s1-update"
|
||||
{
|
||||
UPDATE test_repair_placement_vs_modification SET y = 5 WHERE x = 5;
|
||||
UPDATE test_repair_placement_vs_modification SET y = 5 WHERE x = 5;
|
||||
}
|
||||
|
||||
step "s1-delete"
|
||||
{
|
||||
DELETE FROM test_repair_placement_vs_modification WHERE x = 5;
|
||||
DELETE FROM test_repair_placement_vs_modification WHERE x = 5;
|
||||
}
|
||||
|
||||
step "s1-select"
|
||||
{
|
||||
SELECT count(*) FROM test_repair_placement_vs_modification WHERE x = 5;
|
||||
SELECT count(*) FROM test_repair_placement_vs_modification WHERE x = 5;
|
||||
}
|
||||
|
||||
step "s1-ddl"
|
||||
{
|
||||
CREATE INDEX test_repair_placement_vs_modification_index ON test_repair_placement_vs_modification(x);
|
||||
CREATE INDEX test_repair_placement_vs_modification_index ON test_repair_placement_vs_modification(x);
|
||||
}
|
||||
|
||||
step "s1-copy"
|
||||
{
|
||||
COPY test_repair_placement_vs_modification FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
|
||||
COPY test_repair_placement_vs_modification FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-delete-inactive"
|
||||
{
|
||||
DELETE FROM pg_dist_shard_placement WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
|
||||
DELETE FROM pg_dist_shard_placement WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
|
||||
}
|
||||
|
||||
step "s2-repair-placement"
|
||||
{
|
||||
SELECT citus_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638, transfer_mode := 'block_writes');
|
||||
SELECT citus_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638, transfer_mode := 'block_writes');
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s2-print-content"
|
||||
{
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_repair_placement_vs_modification', 'select y from %s WHERE x = 5')
|
||||
WHERE
|
||||
shardid IN (SELECT * FROM selected_shard)
|
||||
ORDER BY
|
||||
nodeport;
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_repair_placement_vs_modification', 'select y from %s WHERE x = 5')
|
||||
WHERE
|
||||
shardid IN (SELECT * FROM selected_shard)
|
||||
ORDER BY
|
||||
nodeport;
|
||||
}
|
||||
|
||||
step "s2-print-index-count"
|
||||
{
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_repair_placement_vs_modification', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_repair_placement_vs_modification', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
}
|
||||
|
||||
// repair a placement while concurrently performing an update/delete/insert/copy
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
|
||||
setup
|
||||
{
|
||||
CREATE TABLE copy_table(id integer, value integer);
|
||||
SELECT create_distributed_table('copy_table', 'id');
|
||||
COPY copy_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV;
|
||||
CREATE TABLE copy_table(id integer, value integer);
|
||||
SELECT create_distributed_table('copy_table', 'id');
|
||||
COPY copy_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV;
|
||||
}
|
||||
|
||||
// Create and use UDF to close the connection opened in the setup step. Also return the cluster
|
||||
|
@ -30,17 +30,17 @@ step "s1-begin-on-worker"
|
|||
|
||||
step "s1-copy"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY copy_table FROM PROGRAM ''echo 5, 50 && echo 6, 60 && echo 7, 70''WITH CSV');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY copy_table FROM PROGRAM ''echo 5, 50 && echo 6, 60 && echo 7, 70''WITH CSV');
|
||||
}
|
||||
|
||||
step "s1-commit-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
}
|
||||
|
||||
step "s1-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
|
||||
|
@ -48,7 +48,7 @@ session "s2"
|
|||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
// We do not need to begin a transaction on coordinator, since it will be open on workers.
|
||||
|
@ -65,22 +65,22 @@ step "s2-begin-on-worker"
|
|||
|
||||
step "s2-copy"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY copy_table FROM PROGRAM ''echo 5, 50 && echo 8, 80 && echo 9, 90''WITH CSV');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY copy_table FROM PROGRAM ''echo 5, 50 && echo 8, 80 && echo 9, 90''WITH CSV');
|
||||
}
|
||||
|
||||
step "s2-coordinator-drop"
|
||||
{
|
||||
DROP TABLE copy_table;
|
||||
DROP TABLE copy_table;
|
||||
}
|
||||
|
||||
step "s2-select-for-update"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM copy_table WHERE id=5 FOR UPDATE');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM copy_table WHERE id=5 FOR UPDATE');
|
||||
}
|
||||
|
||||
step "s2-coordinator-create-index-concurrently"
|
||||
{
|
||||
CREATE INDEX CONCURRENTLY copy_table_index ON copy_table(id);
|
||||
CREATE INDEX CONCURRENTLY copy_table_index ON copy_table(id);
|
||||
}
|
||||
|
||||
step "s2-commit-worker"
|
||||
|
@ -95,7 +95,7 @@ step "s2-stop-connection"
|
|||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
// We use this as a way to wait for s2-ddl-create-index-concurrently to
|
||||
// complete. We know it can complete after s1-commit has succeeded, this way
|
||||
|
@ -107,7 +107,7 @@ session "s3"
|
|||
|
||||
step "s3-select-count"
|
||||
{
|
||||
SELECT COUNT(*) FROM copy_table;
|
||||
SELECT COUNT(*) FROM copy_table;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
setup
|
||||
{
|
||||
SELECT 1 FROM master_add_node('localhost', 57636, 0);
|
||||
CREATE TABLE citus_local_table_1(a int);
|
||||
CREATE TABLE citus_local_table_1(a int);
|
||||
CREATE TABLE citus_local_table_2(a int unique);
|
||||
|
||||
CREATE SCHEMA another_schema;
|
||||
|
@ -10,7 +10,7 @@ setup
|
|||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS citus_local_table_1, citus_local_table_2 CASCADE;
|
||||
DROP TABLE IF EXISTS citus_local_table_1, citus_local_table_2 CASCADE;
|
||||
DROP SCHEMA IF EXISTS another_schema CASCADE;
|
||||
-- remove coordinator only if it is added to pg_dist_node
|
||||
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node WHERE nodeport=57636;
|
||||
|
|
|
@ -7,54 +7,54 @@
|
|||
|
||||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE observations_with_pk (
|
||||
tenant_id text not null,
|
||||
dummy int,
|
||||
measurement_id bigserial not null,
|
||||
payload jsonb not null,
|
||||
observation_time timestamptz not null default '03/11/2018 02:00:00'::TIMESTAMP,
|
||||
PRIMARY KEY (tenant_id, measurement_id)
|
||||
);
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE observations_with_pk (
|
||||
tenant_id text not null,
|
||||
dummy int,
|
||||
measurement_id bigserial not null,
|
||||
payload jsonb not null,
|
||||
observation_time timestamptz not null default '03/11/2018 02:00:00'::TIMESTAMP,
|
||||
PRIMARY KEY (tenant_id, measurement_id)
|
||||
);
|
||||
|
||||
CREATE TABLE observations_with_full_replica_identity (
|
||||
tenant_id text not null,
|
||||
dummy int,
|
||||
measurement_id bigserial not null,
|
||||
payload jsonb not null,
|
||||
observation_time timestamptz not null default '03/11/2018 02:00:00'::TIMESTAMP
|
||||
);
|
||||
ALTER TABLE observations_with_full_replica_identity REPLICA IDENTITY FULL;
|
||||
CREATE TABLE observations_with_full_replica_identity (
|
||||
tenant_id text not null,
|
||||
dummy int,
|
||||
measurement_id bigserial not null,
|
||||
payload jsonb not null,
|
||||
observation_time timestamptz not null default '03/11/2018 02:00:00'::TIMESTAMP
|
||||
);
|
||||
ALTER TABLE observations_with_full_replica_identity REPLICA IDENTITY FULL;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE observations_with_pk;
|
||||
DROP TABLE observations_with_full_replica_identity;
|
||||
DROP TABLE observations_with_full_replica_identity;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-alter-table"
|
||||
{
|
||||
ALTER TABLE observations_with_pk DROP COLUMN dummy;
|
||||
ALTER TABLE observations_with_full_replica_identity DROP COLUMN dummy;
|
||||
ALTER TABLE observations_with_pk DROP COLUMN dummy;
|
||||
ALTER TABLE observations_with_full_replica_identity DROP COLUMN dummy;
|
||||
}
|
||||
|
||||
step "s1-set-factor-1"
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT citus_set_coordinator_host('localhost');
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT citus_set_coordinator_host('localhost');
|
||||
}
|
||||
|
||||
step "s1-create-distributed-table-observations_with_pk-concurrently"
|
||||
{
|
||||
SELECT create_distributed_table_concurrently('observations_with_pk','tenant_id');
|
||||
SELECT create_distributed_table_concurrently('observations_with_pk','tenant_id');
|
||||
}
|
||||
|
||||
step "s1-create-distributed-table-observations-2-concurrently"
|
||||
{
|
||||
SELECT create_distributed_table_concurrently('observations_with_full_replica_identity','tenant_id');
|
||||
SELECT create_distributed_table_concurrently('observations_with_full_replica_identity','tenant_id');
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
@ -66,81 +66,81 @@ step "s2-begin"
|
|||
|
||||
step "s2-insert-observations_with_pk"
|
||||
{
|
||||
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||
}
|
||||
|
||||
step "s2-insert-observations_with_full_replica_identity"
|
||||
{
|
||||
INSERT INTO observations_with_full_replica_identity(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||
INSERT INTO observations_with_full_replica_identity(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||
INSERT INTO observations_with_full_replica_identity(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||
INSERT INTO observations_with_full_replica_identity(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||
INSERT INTO observations_with_full_replica_identity(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||
INSERT INTO observations_with_full_replica_identity(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||
}
|
||||
|
||||
step "s2-update-observations_with_pk"
|
||||
{
|
||||
UPDATE observations_with_pk set observation_time='03/11/2019 02:00:00'::TIMESTAMP where tenant_id = 'tenant_id' and measurement_id = 3;
|
||||
UPDATE observations_with_pk set observation_time='03/11/2019 02:00:00'::TIMESTAMP where tenant_id = 'tenant_id' and measurement_id = 3;
|
||||
}
|
||||
|
||||
step "s2-update-primary-key-observations_with_pk"
|
||||
{
|
||||
UPDATE observations_with_pk set measurement_id=100 where tenant_id = 'tenant_id' and measurement_id = 4 ;
|
||||
UPDATE observations_with_pk set measurement_id=100 where tenant_id = 'tenant_id' and measurement_id = 4 ;
|
||||
}
|
||||
|
||||
step "s2-update-observations_with_full_replica_identity"
|
||||
{
|
||||
UPDATE observations_with_full_replica_identity set observation_time='03/11/2019 02:00:00'::TIMESTAMP where tenant_id = 'tenant_id' and measurement_id = 3;
|
||||
UPDATE observations_with_full_replica_identity set observation_time='03/11/2019 02:00:00'::TIMESTAMP where tenant_id = 'tenant_id' and measurement_id = 3;
|
||||
}
|
||||
|
||||
step "s2-delete-observations_with_pk"
|
||||
{
|
||||
DELETE FROM observations_with_pk where tenant_id = 'tenant_id' and measurement_id = 3 ;
|
||||
DELETE FROM observations_with_pk where tenant_id = 'tenant_id' and measurement_id = 3 ;
|
||||
}
|
||||
|
||||
step "s2-delete-observations_with_full_replica_identity"
|
||||
{
|
||||
DELETE FROM observations_with_full_replica_identity where tenant_id = 'tenant_id' and measurement_id = 3 ;
|
||||
DELETE FROM observations_with_full_replica_identity where tenant_id = 'tenant_id' and measurement_id = 3 ;
|
||||
}
|
||||
|
||||
step "s2-end"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s2-print-cluster-1"
|
||||
{
|
||||
-- row count per shard
|
||||
SELECT
|
||||
nodeport, shardid, success, result
|
||||
FROM
|
||||
run_command_on_placements('observations_with_pk', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport, shardid;
|
||||
-- row count per shard
|
||||
SELECT
|
||||
nodeport, shardid, success, result
|
||||
FROM
|
||||
run_command_on_placements('observations_with_pk', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport, shardid;
|
||||
|
||||
SELECT *
|
||||
FROM
|
||||
observations_with_pk
|
||||
ORDER BY
|
||||
measurement_id;
|
||||
SELECT *
|
||||
FROM
|
||||
observations_with_pk
|
||||
ORDER BY
|
||||
measurement_id;
|
||||
}
|
||||
|
||||
step "s2-print-cluster-2"
|
||||
{
|
||||
-- row count per shard
|
||||
SELECT
|
||||
nodeport, shardid, success, result
|
||||
FROM
|
||||
run_command_on_placements('observations_with_full_replica_identity', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport, shardid;
|
||||
-- row count per shard
|
||||
SELECT
|
||||
nodeport, shardid, success, result
|
||||
FROM
|
||||
run_command_on_placements('observations_with_full_replica_identity', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport, shardid;
|
||||
|
||||
SELECT *
|
||||
FROM
|
||||
observations_with_full_replica_identity
|
||||
ORDER BY
|
||||
measurement_id;
|
||||
SELECT *
|
||||
FROM
|
||||
observations_with_full_replica_identity
|
||||
ORDER BY
|
||||
measurement_id;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
setup
|
||||
{
|
||||
CREATE TABLE table_to_distribute(id int);
|
||||
CREATE TABLE table_to_distribute(id int);
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE table_to_distribute CASCADE;
|
||||
DROP TABLE table_to_distribute CASCADE;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -17,12 +17,12 @@ step "s1-begin"
|
|||
|
||||
step "s1-create_distributed_table"
|
||||
{
|
||||
SELECT create_distributed_table('table_to_distribute', 'id');
|
||||
SELECT create_distributed_table('table_to_distribute', 'id');
|
||||
}
|
||||
|
||||
step "s1-copy_to_local_table"
|
||||
{
|
||||
COPY table_to_distribute FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3 && echo 4 && echo 5 && echo 6 && echo 7 && echo 8';
|
||||
COPY table_to_distribute FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3 && echo 4 && echo 5 && echo 6 && echo 7 && echo 8';
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
|
@ -34,22 +34,22 @@ session "s2"
|
|||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-create_distributed_table"
|
||||
{
|
||||
SELECT create_distributed_table('table_to_distribute', 'id');
|
||||
SELECT create_distributed_table('table_to_distribute', 'id');
|
||||
}
|
||||
|
||||
step "s2-copy_to_local_table"
|
||||
{
|
||||
COPY table_to_distribute FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3 && echo 4 && echo 5 && echo 6 && echo 7 && echo 8';
|
||||
COPY table_to_distribute FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3 && echo 4 && echo 5 && echo 6 && echo 7 && echo 8';
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
//concurrent create_distributed_table on empty table
|
||||
|
|
|
@ -1,140 +1,140 @@
|
|||
setup
|
||||
{
|
||||
select setval('pg_dist_shardid_seq', GREATEST(1400292, nextval('pg_dist_shardid_seq')-1));
|
||||
-- make sure coordinator is in metadata
|
||||
SELECT citus_set_coordinator_host('localhost', 57636);
|
||||
CREATE TABLE table_1(id int PRIMARY KEY);
|
||||
CREATE TABLE table_2(id smallint PRIMARY KEY);
|
||||
CREATE TABLE table_default_colocated(id int PRIMARY KEY);
|
||||
CREATE TABLE table_none_colocated(id int PRIMARY KEY);
|
||||
select setval('pg_dist_shardid_seq', GREATEST(1400292, nextval('pg_dist_shardid_seq')-1));
|
||||
-- make sure coordinator is in metadata
|
||||
SELECT citus_set_coordinator_host('localhost', 57636);
|
||||
CREATE TABLE table_1(id int PRIMARY KEY);
|
||||
CREATE TABLE table_2(id smallint PRIMARY KEY);
|
||||
CREATE TABLE table_default_colocated(id int PRIMARY KEY);
|
||||
CREATE TABLE table_none_colocated(id int PRIMARY KEY);
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE table_1 CASCADE;
|
||||
DROP TABLE table_2 CASCADE;
|
||||
DROP TABLE table_default_colocated CASCADE;
|
||||
DROP TABLE table_none_colocated CASCADE;
|
||||
SELECT citus_remove_node('localhost', 57636);
|
||||
DROP TABLE table_1 CASCADE;
|
||||
DROP TABLE table_2 CASCADE;
|
||||
DROP TABLE table_default_colocated CASCADE;
|
||||
DROP TABLE table_none_colocated CASCADE;
|
||||
SELECT citus_remove_node('localhost', 57636);
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-create-concurrently-table_1"
|
||||
{
|
||||
SELECT create_distributed_table_concurrently('table_1', 'id');
|
||||
SELECT create_distributed_table_concurrently('table_1', 'id');
|
||||
}
|
||||
|
||||
step "s1-create-concurrently-table_2"
|
||||
{
|
||||
SELECT create_distributed_table_concurrently('table_2', 'id');
|
||||
SELECT create_distributed_table_concurrently('table_2', 'id');
|
||||
}
|
||||
|
||||
step "s1-create-concurrently-table_default_colocated"
|
||||
{
|
||||
SELECT create_distributed_table_concurrently('table_default_colocated', 'id');
|
||||
SELECT create_distributed_table_concurrently('table_default_colocated', 'id');
|
||||
}
|
||||
|
||||
step "s1-create-concurrently-table_none_colocated"
|
||||
{
|
||||
SELECT create_distributed_table_concurrently('table_none_colocated', 'id', colocate_with => 'none');
|
||||
SELECT create_distributed_table_concurrently('table_none_colocated', 'id', colocate_with => 'none');
|
||||
}
|
||||
|
||||
step "s1-settings"
|
||||
{
|
||||
-- session needs to have replication factor set to 1, can't do in setup
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
-- session needs to have replication factor set to 1, can't do in setup
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
}
|
||||
|
||||
step "s1-truncate"
|
||||
{
|
||||
TRUNCATE table_1;
|
||||
TRUNCATE table_1;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-settings"
|
||||
{
|
||||
-- session needs to have replication factor set to 1, can't do in setup
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
-- session needs to have replication factor set to 1, can't do in setup
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
}
|
||||
|
||||
step "s2-insert"
|
||||
{
|
||||
INSERT INTO table_1 SELECT s FROM generate_series(1,20) s;
|
||||
INSERT INTO table_1 SELECT s FROM generate_series(1,20) s;
|
||||
}
|
||||
|
||||
step "s2-update"
|
||||
{
|
||||
UPDATE table_1 SET id = 21 WHERE id = 20;
|
||||
UPDATE table_1 SET id = 21 WHERE id = 20;
|
||||
}
|
||||
|
||||
step "s2-delete"
|
||||
{
|
||||
DELETE FROM table_1 WHERE id = 11;
|
||||
DELETE FROM table_1 WHERE id = 11;
|
||||
}
|
||||
|
||||
step "s2-copy"
|
||||
{
|
||||
COPY table_1 FROM PROGRAM 'echo 30 && echo 31 && echo 32 && echo 33 && echo 34 && echo 35 && echo 36 && echo 37 && echo 38';
|
||||
COPY table_1 FROM PROGRAM 'echo 30 && echo 31 && echo 32 && echo 33 && echo 34 && echo 35 && echo 36 && echo 37 && echo 38';
|
||||
}
|
||||
|
||||
step "s2-reindex"
|
||||
{
|
||||
REINDEX TABLE table_1;
|
||||
REINDEX TABLE table_1;
|
||||
}
|
||||
|
||||
step "s2-reindex-concurrently"
|
||||
{
|
||||
REINDEX TABLE CONCURRENTLY table_1;
|
||||
REINDEX TABLE CONCURRENTLY table_1;
|
||||
}
|
||||
|
||||
step "s2-create-concurrently-table_1"
|
||||
{
|
||||
SELECT create_distributed_table_concurrently('table_1', 'id');
|
||||
SELECT create_distributed_table_concurrently('table_1', 'id');
|
||||
}
|
||||
|
||||
step "s2-create-table_1"
|
||||
{
|
||||
SELECT create_distributed_table('table_1', 'id');
|
||||
SELECT create_distributed_table('table_1', 'id');
|
||||
}
|
||||
|
||||
step "s2-create-concurrently-table_2"
|
||||
{
|
||||
SELECT create_distributed_table_concurrently('table_2', 'id');
|
||||
SELECT create_distributed_table_concurrently('table_2', 'id');
|
||||
}
|
||||
|
||||
step "s2-create-table_2"
|
||||
{
|
||||
SELECT create_distributed_table('table_2', 'id');
|
||||
SELECT create_distributed_table('table_2', 'id');
|
||||
}
|
||||
|
||||
step "s2-create-table_2-none"
|
||||
{
|
||||
SELECT create_distributed_table('table_2', 'id', colocate_with => 'none');
|
||||
SELECT create_distributed_table('table_2', 'id', colocate_with => 'none');
|
||||
}
|
||||
|
||||
step "s2-print-status"
|
||||
{
|
||||
-- sanity check on partitions
|
||||
SELECT * FROM pg_dist_shard
|
||||
WHERE logicalrelid = 'table_1'::regclass OR logicalrelid = 'table_2'::regclass
|
||||
ORDER BY shardminvalue::BIGINT, logicalrelid;
|
||||
-- sanity check on partitions
|
||||
SELECT * FROM pg_dist_shard
|
||||
WHERE logicalrelid = 'table_1'::regclass OR logicalrelid = 'table_2'::regclass
|
||||
ORDER BY shardminvalue::BIGINT, logicalrelid;
|
||||
|
||||
-- sanity check on total elements in the table
|
||||
SELECT COUNT(*) FROM table_1;
|
||||
-- sanity check on total elements in the table
|
||||
SELECT COUNT(*) FROM table_1;
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s3"
|
||||
|
@ -156,21 +156,21 @@ session "s4"
|
|||
|
||||
step "s4-print-waiting-locks"
|
||||
{
|
||||
SELECT mode, relation::regclass, granted FROM pg_locks
|
||||
WHERE relation = 'table_1'::regclass OR relation = 'table_2'::regclass
|
||||
ORDER BY mode, relation, granted;
|
||||
SELECT mode, relation::regclass, granted FROM pg_locks
|
||||
WHERE relation = 'table_1'::regclass OR relation = 'table_2'::regclass
|
||||
ORDER BY mode, relation, granted;
|
||||
}
|
||||
|
||||
step "s4-print-waiting-advisory-locks"
|
||||
{
|
||||
SELECT mode, classid, objid, objsubid, granted FROM pg_locks
|
||||
WHERE locktype = 'advisory' AND classid = 0 AND objid = 3 AND objsubid = 9
|
||||
ORDER BY granted;
|
||||
SELECT mode, classid, objid, objsubid, granted FROM pg_locks
|
||||
WHERE locktype = 'advisory' AND classid = 0 AND objid = 3 AND objsubid = 9
|
||||
ORDER BY granted;
|
||||
}
|
||||
|
||||
step "s4-print-colocations"
|
||||
{
|
||||
SELECT shardcount, replicationfactor, distributioncolumntype, distributioncolumncollation FROM pg_dist_colocation ORDER BY colocationid;
|
||||
SELECT shardcount, replicationfactor, distributioncolumntype, distributioncolumncollation FROM pg_dist_colocation ORDER BY colocationid;
|
||||
}
|
||||
|
||||
// show concurrent insert is NOT blocked by create_distributed_table_concurrently
|
||||
|
|
|
@ -1,142 +1,142 @@
|
|||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE restore_table (test_id integer NOT NULL, data text);
|
||||
CREATE TABLE restore_ref_table (test_id integer NOT NULL, data text);
|
||||
SELECT create_distributed_table('restore_table', 'test_id');
|
||||
SELECT create_reference_table('restore_ref_table');
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE restore_table (test_id integer NOT NULL, data text);
|
||||
CREATE TABLE restore_ref_table (test_id integer NOT NULL, data text);
|
||||
SELECT create_distributed_table('restore_table', 'test_id');
|
||||
SELECT create_reference_table('restore_ref_table');
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS restore_table, restore_ref_table, test_create_distributed_table, test_create_reference_table;
|
||||
DROP TABLE IF EXISTS restore_table, restore_ref_table, test_create_distributed_table, test_create_reference_table;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-create-reference"
|
||||
{
|
||||
CREATE TABLE test_create_reference_table (test_id integer NOT NULL, data text);
|
||||
SELECT create_reference_table('test_create_reference_table');
|
||||
CREATE TABLE test_create_reference_table (test_id integer NOT NULL, data text);
|
||||
SELECT create_reference_table('test_create_reference_table');
|
||||
}
|
||||
|
||||
step "s1-create-distributed"
|
||||
{
|
||||
CREATE TABLE test_create_distributed_table (test_id integer NOT NULL, data text);
|
||||
SELECT create_distributed_table('test_create_distributed_table', 'test_id');
|
||||
CREATE TABLE test_create_distributed_table (test_id integer NOT NULL, data text);
|
||||
SELECT create_distributed_table('test_create_distributed_table', 'test_id');
|
||||
}
|
||||
|
||||
step "s1-insert"
|
||||
{
|
||||
INSERT INTO restore_table VALUES (1,'hello');
|
||||
INSERT INTO restore_table VALUES (1,'hello');
|
||||
}
|
||||
|
||||
step "s1-insert-ref"
|
||||
{
|
||||
INSERT INTO restore_ref_table VALUES (1,'hello');
|
||||
INSERT INTO restore_ref_table VALUES (1,'hello');
|
||||
}
|
||||
|
||||
step "s1-modify-multiple"
|
||||
{
|
||||
UPDATE restore_table SET data = 'world';
|
||||
UPDATE restore_table SET data = 'world';
|
||||
}
|
||||
|
||||
step "s1-modify-multiple-ref"
|
||||
{
|
||||
UPDATE restore_ref_table SET data = 'world';
|
||||
UPDATE restore_ref_table SET data = 'world';
|
||||
}
|
||||
|
||||
step "s1-multi-statement-ref"
|
||||
{
|
||||
BEGIN;
|
||||
INSERT INTO restore_ref_table VALUES (1,'hello');
|
||||
INSERT INTO restore_ref_table VALUES (2,'hello');
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO restore_ref_table VALUES (1,'hello');
|
||||
INSERT INTO restore_ref_table VALUES (2,'hello');
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s1-multi-statement"
|
||||
{
|
||||
BEGIN;
|
||||
INSERT INTO restore_table VALUES (1,'hello');
|
||||
INSERT INTO restore_table VALUES (2,'hello');
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO restore_table VALUES (1,'hello');
|
||||
INSERT INTO restore_table VALUES (2,'hello');
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s1-ddl-ref"
|
||||
{
|
||||
ALTER TABLE restore_ref_table ADD COLUMN x int;
|
||||
ALTER TABLE restore_ref_table ADD COLUMN x int;
|
||||
}
|
||||
|
||||
step "s1-ddl"
|
||||
{
|
||||
ALTER TABLE restore_table ADD COLUMN x int;
|
||||
ALTER TABLE restore_table ADD COLUMN x int;
|
||||
}
|
||||
|
||||
step "s1-copy-ref"
|
||||
{
|
||||
COPY restore_ref_table FROM PROGRAM 'echo 1,hello' WITH CSV;
|
||||
COPY restore_ref_table FROM PROGRAM 'echo 1,hello' WITH CSV;
|
||||
}
|
||||
|
||||
step "s1-copy"
|
||||
{
|
||||
COPY restore_table FROM PROGRAM 'echo 1,hello' WITH CSV;
|
||||
COPY restore_table FROM PROGRAM 'echo 1,hello' WITH CSV;
|
||||
}
|
||||
|
||||
step "s1-recover"
|
||||
{
|
||||
SELECT recover_prepared_transactions();
|
||||
SELECT recover_prepared_transactions();
|
||||
}
|
||||
|
||||
step "s1-create-restore"
|
||||
{
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test-2');
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test-2');
|
||||
}
|
||||
|
||||
step "s1-drop"
|
||||
{
|
||||
DROP TABLE restore_table;
|
||||
DROP TABLE restore_table;
|
||||
}
|
||||
|
||||
step "s1-drop-ref"
|
||||
{
|
||||
DROP TABLE restore_ref_table;
|
||||
DROP TABLE restore_ref_table;
|
||||
}
|
||||
|
||||
step "s1-add-node"
|
||||
{
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 9999);
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 9999);
|
||||
}
|
||||
|
||||
step "s1-remove-node"
|
||||
{
|
||||
SELECT master_remove_node('localhost', 9999);
|
||||
SELECT master_remove_node('localhost', 9999);
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-create-restore"
|
||||
{
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
// verify that citus_create_restore_point is blocked by concurrent create_distributed_table
|
||||
|
|
|
@ -1,94 +1,94 @@
|
|||
setup
|
||||
{
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT * FROM master_get_active_worker_nodes() ORDER BY node_name, node_port;
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT * FROM master_get_active_worker_nodes() ORDER BY node_name, node_port;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS dist_table;
|
||||
DROP TABLE IF EXISTS dist_table;
|
||||
|
||||
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
|
||||
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-add-node-2"
|
||||
{
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
}
|
||||
|
||||
step "s1-remove-node-2"
|
||||
{
|
||||
SELECT * FROM master_remove_node('localhost', 57638);
|
||||
SELECT * FROM master_remove_node('localhost', 57638);
|
||||
}
|
||||
|
||||
step "s1-abort"
|
||||
{
|
||||
ABORT;
|
||||
ABORT;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s1-show-placements"
|
||||
{
|
||||
SELECT
|
||||
nodename, nodeport
|
||||
FROM
|
||||
pg_dist_shard_placement JOIN pg_dist_shard USING (shardid)
|
||||
WHERE
|
||||
logicalrelid = 'dist_table'::regclass
|
||||
ORDER BY
|
||||
nodename, nodeport;
|
||||
SELECT
|
||||
nodename, nodeport
|
||||
FROM
|
||||
pg_dist_shard_placement JOIN pg_dist_shard USING (shardid)
|
||||
WHERE
|
||||
logicalrelid = 'dist_table'::regclass
|
||||
ORDER BY
|
||||
nodename, nodeport;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-create-table-1"
|
||||
{
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
SELECT create_distributed_table('dist_table', 'x');
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
SELECT create_distributed_table('dist_table', 'x');
|
||||
}
|
||||
|
||||
step "s2-create-table-2"
|
||||
{
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
SELECT create_distributed_table('dist_table', 'x');
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
SELECT create_distributed_table('dist_table', 'x');
|
||||
}
|
||||
|
||||
step "s2-create-append-table"
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
SELECT create_distributed_table('dist_table', 'x', 'append');
|
||||
SELECT 1 FROM master_create_empty_shard('dist_table');
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
SELECT create_distributed_table('dist_table', 'x', 'append');
|
||||
SELECT 1 FROM master_create_empty_shard('dist_table');
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
{
|
||||
SELECT * FROM dist_table;
|
||||
SELECT * FROM dist_table;
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
// session 1 adds a node, session 2 creates a distributed table
|
||||
|
|
|
@ -1,61 +1,61 @@
|
|||
setup
|
||||
{
|
||||
CREATE TABLE migration_table (test_id integer NOT NULL, data text);
|
||||
CREATE TABLE migration_table (test_id integer NOT NULL, data text);
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE migration_table;
|
||||
DROP TABLE migration_table;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-begin-serializable"
|
||||
{
|
||||
BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
|
||||
SELECT 1;
|
||||
BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
|
||||
SELECT 1;
|
||||
}
|
||||
|
||||
step "s1-create_distributed_table"
|
||||
{
|
||||
SELECT create_distributed_table('migration_table', 'test_id');
|
||||
SELECT create_distributed_table('migration_table', 'test_id');
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-copy"
|
||||
{
|
||||
COPY migration_table FROM PROGRAM 'echo 1,hello' WITH CSV;
|
||||
COPY migration_table FROM PROGRAM 'echo 1,hello' WITH CSV;
|
||||
}
|
||||
|
||||
step "s2-insert"
|
||||
{
|
||||
INSERT INTO migration_table VALUES (1, 'hello');
|
||||
INSERT INTO migration_table VALUES (1, 'hello');
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
{
|
||||
SELECT * FROM migration_table ORDER BY test_id;
|
||||
SELECT * FROM migration_table ORDER BY test_id;
|
||||
}
|
||||
|
||||
// verify that local COPY is picked up by create_distributed_table once it commits
|
||||
|
|
|
@ -5,15 +5,15 @@
|
|||
// create range distributed table to test behavior of DDL in concurrent operations
|
||||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE ddl_hash(id integer, data text);
|
||||
SELECT create_distributed_table('ddl_hash', 'id');
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE ddl_hash(id integer, data text);
|
||||
SELECT create_distributed_table('ddl_hash', 'id');
|
||||
}
|
||||
|
||||
// drop distributed table
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS ddl_hash CASCADE;
|
||||
DROP TABLE IF EXISTS ddl_hash CASCADE;
|
||||
}
|
||||
|
||||
// session 1
|
||||
|
|
|
@ -5,15 +5,15 @@
|
|||
// create range distributed table to test behavior of DELETE in concurrent operations
|
||||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE delete_hash(id integer, data text);
|
||||
SELECT create_distributed_table('delete_hash', 'id');
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE delete_hash(id integer, data text);
|
||||
SELECT create_distributed_table('delete_hash', 'id');
|
||||
}
|
||||
|
||||
// drop distributed table
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS delete_hash CASCADE;
|
||||
DROP TABLE IF EXISTS delete_hash CASCADE;
|
||||
}
|
||||
|
||||
// session 1
|
||||
|
|
|
@ -2,68 +2,68 @@
|
|||
|
||||
setup
|
||||
{
|
||||
CREATE TABLE ref_table(id int PRIMARY KEY, value int);
|
||||
SELECT create_reference_table('ref_table');
|
||||
CREATE TABLE ref_table(id int PRIMARY KEY, value int);
|
||||
SELECT create_reference_table('ref_table');
|
||||
|
||||
CREATE TABLE dist_table(id int, value int REFERENCES ref_table(id) ON DELETE CASCADE ON UPDATE CASCADE);
|
||||
SELECT create_distributed_table('dist_table', 'id');
|
||||
CREATE TABLE dist_table(id int, value int REFERENCES ref_table(id) ON DELETE CASCADE ON UPDATE CASCADE);
|
||||
SELECT create_distributed_table('dist_table', 'id');
|
||||
|
||||
INSERT INTO ref_table VALUES (1, 10), (2, 20);
|
||||
INSERT INTO dist_table VALUES (1, 1), (2, 2);
|
||||
INSERT INTO ref_table VALUES (1, 10), (2, 20);
|
||||
INSERT INTO dist_table VALUES (1, 1), (2, 2);
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE ref_table, dist_table;
|
||||
DROP TABLE ref_table, dist_table;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-start-session-level-connection"
|
||||
{
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
}
|
||||
|
||||
step "s1-begin-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
}
|
||||
|
||||
step "s1-delete"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1');
|
||||
SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1');
|
||||
}
|
||||
|
||||
step "s1-update"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET id=id+2 WHERE id=1');
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET id=id+2 WHERE id=1');
|
||||
}
|
||||
|
||||
step "s1-commit-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
}
|
||||
|
||||
step "s1-rollback-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
|
||||
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
|
||||
}
|
||||
|
||||
step "s1-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-start-session-level-connection"
|
||||
{
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
}
|
||||
|
||||
step "s2-begin-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
}
|
||||
|
||||
step "s2-insert"
|
||||
|
@ -103,17 +103,17 @@ step "s2-select-for-udpate"
|
|||
|
||||
step "s2-coordinator-create-index-concurrently"
|
||||
{
|
||||
CREATE INDEX CONCURRENTLY dist_table_index ON dist_table(id);
|
||||
CREATE INDEX CONCURRENTLY dist_table_index ON dist_table(id);
|
||||
}
|
||||
|
||||
step "s2-commit-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
}
|
||||
|
||||
step "s2-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
// We use this as a way to wait for s2-ddl-create-index-concurrently to
|
||||
|
@ -126,8 +126,8 @@ session "s3"
|
|||
|
||||
step "s3-display"
|
||||
{
|
||||
SELECT * FROM ref_table ORDER BY id, value;
|
||||
SELECT * FROM dist_table ORDER BY id, value;
|
||||
SELECT * FROM ref_table ORDER BY id, value;
|
||||
SELECT * FROM dist_table ORDER BY id, value;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1,297 +1,297 @@
|
|||
setup
|
||||
{
|
||||
CREATE TABLE deadlock_detection_reference (user_id int UNIQUE, some_val int);
|
||||
SELECT create_reference_table('deadlock_detection_reference');
|
||||
CREATE TABLE deadlock_detection_reference (user_id int UNIQUE, some_val int);
|
||||
SELECT create_reference_table('deadlock_detection_reference');
|
||||
|
||||
CREATE TABLE deadlock_detection_test (user_id int UNIQUE, some_val int);
|
||||
INSERT INTO deadlock_detection_test SELECT i, i FROM generate_series(1,7) i;
|
||||
SELECT create_distributed_table('deadlock_detection_test', 'user_id');
|
||||
CREATE TABLE deadlock_detection_test (user_id int UNIQUE, some_val int);
|
||||
INSERT INTO deadlock_detection_test SELECT i, i FROM generate_series(1,7) i;
|
||||
SELECT create_distributed_table('deadlock_detection_test', 'user_id');
|
||||
|
||||
CREATE TABLE local_deadlock_table (user_id int UNIQUE, some_val int);
|
||||
CREATE TABLE local_deadlock_table (user_id int UNIQUE, some_val int);
|
||||
|
||||
CREATE TABLE deadlock_detection_test_rep_2 (user_id int UNIQUE, some_val int);
|
||||
SET citus.shard_replication_factor = 2;
|
||||
SELECT create_distributed_table('deadlock_detection_test_rep_2', 'user_id');
|
||||
CREATE TABLE deadlock_detection_test_rep_2 (user_id int UNIQUE, some_val int);
|
||||
SET citus.shard_replication_factor = 2;
|
||||
SELECT create_distributed_table('deadlock_detection_test_rep_2', 'user_id');
|
||||
|
||||
INSERT INTO deadlock_detection_test_rep_2 VALUES (1,1);
|
||||
INSERT INTO deadlock_detection_test_rep_2 VALUES (2,2);
|
||||
INSERT INTO deadlock_detection_test_rep_2 VALUES (1,1);
|
||||
INSERT INTO deadlock_detection_test_rep_2 VALUES (2,2);
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE deadlock_detection_test;
|
||||
DROP TABLE local_deadlock_table;
|
||||
DROP TABLE deadlock_detection_test_rep_2;
|
||||
DROP TABLE deadlock_detection_reference;
|
||||
SET citus.shard_replication_factor = 1;
|
||||
DROP TABLE deadlock_detection_test;
|
||||
DROP TABLE local_deadlock_table;
|
||||
DROP TABLE deadlock_detection_test_rep_2;
|
||||
DROP TABLE deadlock_detection_reference;
|
||||
SET citus.shard_replication_factor = 1;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-update-1"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1;
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1;
|
||||
}
|
||||
|
||||
step "s1-update-2"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2;
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2;
|
||||
}
|
||||
|
||||
step "s1-update-4"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 4;
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 4;
|
||||
}
|
||||
|
||||
step "s1-update-5"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 5;
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 5;
|
||||
}
|
||||
|
||||
step "s1-insert-dist-10"
|
||||
{
|
||||
INSERT INTO deadlock_detection_test VALUES (10, 10);
|
||||
INSERT INTO deadlock_detection_test VALUES (10, 10);
|
||||
}
|
||||
|
||||
step "s1-insert-local-10"
|
||||
{
|
||||
INSERT INTO local_deadlock_table VALUES (10, 10);
|
||||
INSERT INTO local_deadlock_table VALUES (10, 10);
|
||||
}
|
||||
|
||||
step "s1-update-1-rep-2"
|
||||
{
|
||||
UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 1;
|
||||
UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 1;
|
||||
}
|
||||
|
||||
step "s1-update-2-rep-2"
|
||||
{
|
||||
UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 2;
|
||||
UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 2;
|
||||
}
|
||||
|
||||
step "s1-insert-ref-10"
|
||||
{
|
||||
INSERT INTO deadlock_detection_reference VALUES (10, 10);
|
||||
INSERT INTO deadlock_detection_reference VALUES (10, 10);
|
||||
}
|
||||
|
||||
step "s1-insert-ref-11"
|
||||
{
|
||||
INSERT INTO deadlock_detection_reference VALUES (11, 11);
|
||||
INSERT INTO deadlock_detection_reference VALUES (11, 11);
|
||||
}
|
||||
|
||||
step "s1-update-2-4"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2 OR user_id = 4;
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2 OR user_id = 4;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-update-1"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1;
|
||||
UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1;
|
||||
}
|
||||
|
||||
step "s2-update-2"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2;
|
||||
UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2;
|
||||
}
|
||||
|
||||
step "s2-update-3"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3;
|
||||
UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3;
|
||||
}
|
||||
|
||||
step "s2-update-4"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 4;
|
||||
UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 4;
|
||||
}
|
||||
|
||||
step "s2-upsert-select-all"
|
||||
{
|
||||
INSERT INTO deadlock_detection_test SELECT * FROM deadlock_detection_test ON CONFLICT(user_id) DO UPDATE SET some_val = deadlock_detection_test.some_val + 5 RETURNING *;
|
||||
INSERT INTO deadlock_detection_test SELECT * FROM deadlock_detection_test ON CONFLICT(user_id) DO UPDATE SET some_val = deadlock_detection_test.some_val + 5 RETURNING *;
|
||||
}
|
||||
|
||||
step "s2-ddl"
|
||||
{
|
||||
ALTER TABLE deadlock_detection_test ADD COLUMN test_col INT;
|
||||
ALTER TABLE deadlock_detection_test ADD COLUMN test_col INT;
|
||||
}
|
||||
|
||||
step "s2-insert-dist-10"
|
||||
{
|
||||
INSERT INTO deadlock_detection_test VALUES (10, 10);
|
||||
INSERT INTO deadlock_detection_test VALUES (10, 10);
|
||||
}
|
||||
|
||||
step "s2-insert-local-10"
|
||||
{
|
||||
INSERT INTO local_deadlock_table VALUES (10, 10);
|
||||
INSERT INTO local_deadlock_table VALUES (10, 10);
|
||||
}
|
||||
|
||||
step "s2-update-1-rep-2"
|
||||
{
|
||||
UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 1;
|
||||
UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 1;
|
||||
}
|
||||
|
||||
step "s2-update-2-rep-2"
|
||||
{
|
||||
UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 2;
|
||||
UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 2;
|
||||
}
|
||||
|
||||
step "s2-insert-ref-10"
|
||||
{
|
||||
INSERT INTO deadlock_detection_reference VALUES (10, 10);
|
||||
INSERT INTO deadlock_detection_reference VALUES (10, 10);
|
||||
}
|
||||
|
||||
step "s2-insert-ref-11"
|
||||
{
|
||||
INSERT INTO deadlock_detection_reference VALUES (11, 11);
|
||||
INSERT INTO deadlock_detection_reference VALUES (11, 11);
|
||||
}
|
||||
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s3"
|
||||
|
||||
step "s3-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s3-update-1"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 1;
|
||||
UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 1;
|
||||
}
|
||||
|
||||
step "s3-update-2"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2;
|
||||
UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2;
|
||||
}
|
||||
|
||||
step "s3-update-3"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3;
|
||||
UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3;
|
||||
}
|
||||
|
||||
step "s3-update-4"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 4;
|
||||
UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 4;
|
||||
}
|
||||
|
||||
step "s3-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s4"
|
||||
|
||||
step "s4-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s4-update-1"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 1;
|
||||
UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 1;
|
||||
}
|
||||
|
||||
step "s4-update-2"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 2;
|
||||
UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 2;
|
||||
}
|
||||
|
||||
step "s4-update-4"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4;
|
||||
UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4;
|
||||
}
|
||||
|
||||
step "s4-update-5"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 5;
|
||||
UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 5;
|
||||
}
|
||||
|
||||
step "s4-update-6"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 6;
|
||||
UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 6;
|
||||
}
|
||||
|
||||
step "s4-random-adv-lock"
|
||||
{
|
||||
SELECT pg_advisory_xact_lock(8765);
|
||||
SELECT pg_advisory_xact_lock(8765);
|
||||
}
|
||||
|
||||
step "s4-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s5"
|
||||
|
||||
step "s5-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s5-update-1"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 1;
|
||||
UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 1;
|
||||
}
|
||||
|
||||
step "s5-update-5"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 5;
|
||||
UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 5;
|
||||
}
|
||||
|
||||
step "s5-update-6"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 6;
|
||||
UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 6;
|
||||
}
|
||||
|
||||
step "s5-random-adv-lock"
|
||||
{
|
||||
SELECT pg_advisory_xact_lock(8765);
|
||||
SELECT pg_advisory_xact_lock(8765);
|
||||
}
|
||||
|
||||
step "s5-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s6"
|
||||
|
||||
step "s6-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s6-update-5"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 5;
|
||||
UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 5;
|
||||
}
|
||||
|
||||
step "s6-update-6"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 6;
|
||||
UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 6;
|
||||
}
|
||||
|
||||
step "s6-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
// we disable the daemon during the regression tests in order to get consistent results
|
||||
|
@ -302,7 +302,7 @@ session "deadlock-checker"
|
|||
// backend inappropriately
|
||||
step "deadlock-checker-call"
|
||||
{
|
||||
SELECT check_distributed_deadlocks();
|
||||
SELECT check_distributed_deadlocks();
|
||||
}
|
||||
|
||||
// simplest case, loop with two nodes (Reminder: Citus uses 2PC)
|
||||
|
|
|
@ -2,12 +2,12 @@
|
|||
|
||||
setup
|
||||
{
|
||||
SET TIME ZONE 'PST8PDT';
|
||||
SET TIME ZONE 'PST8PDT';
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
SET TIME ZONE DEFAULT;
|
||||
SET TIME ZONE DEFAULT;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -34,40 +34,40 @@ step "s1-commit"
|
|||
|
||||
step "s1-create-table"
|
||||
{
|
||||
-- some tests also use distributed table
|
||||
CREATE TABLE distributed_transaction_id_table(some_value int, other_value int);
|
||||
SET citus.shard_count TO 4;
|
||||
SELECT create_distributed_table('distributed_transaction_id_table', 'some_value');
|
||||
-- some tests also use distributed table
|
||||
CREATE TABLE distributed_transaction_id_table(some_value int, other_value int);
|
||||
SET citus.shard_count TO 4;
|
||||
SELECT create_distributed_table('distributed_transaction_id_table', 'some_value');
|
||||
}
|
||||
|
||||
step "s1-insert"
|
||||
{
|
||||
INSERT INTO distributed_transaction_id_table VALUES (1, 1);
|
||||
INSERT INTO distributed_transaction_id_table VALUES (1, 1);
|
||||
}
|
||||
|
||||
step "s1-verify-current-xact-is-on-worker"
|
||||
{
|
||||
SELECT
|
||||
remote.nodeport,
|
||||
remote.result = row(xact.transaction_number)::text AS xact_exists
|
||||
FROM
|
||||
get_current_transaction_id() as xact,
|
||||
run_command_on_workers($$
|
||||
SELECT row(transaction_number)
|
||||
SELECT
|
||||
remote.nodeport,
|
||||
remote.result = row(xact.transaction_number)::text AS xact_exists
|
||||
FROM
|
||||
get_current_transaction_id() as xact,
|
||||
run_command_on_workers($$
|
||||
SELECT row(transaction_number)
|
||||
FROM get_all_active_transactions()
|
||||
WHERE transaction_number != 0;
|
||||
WHERE transaction_number != 0;
|
||||
$$) as remote
|
||||
ORDER BY remote.nodeport ASC;
|
||||
}
|
||||
|
||||
step "s1-get-all-transactions"
|
||||
{
|
||||
SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3;
|
||||
SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3;
|
||||
}
|
||||
|
||||
step "s1-drop-table"
|
||||
{
|
||||
DROP TABLE distributed_transaction_id_table;
|
||||
DROP TABLE distributed_transaction_id_table;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
@ -94,7 +94,7 @@ step "s2-commit"
|
|||
|
||||
step "s2-get-all-transactions"
|
||||
{
|
||||
SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3;
|
||||
SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3;
|
||||
}
|
||||
|
||||
session "s3"
|
||||
|
@ -116,7 +116,7 @@ step "s3-commit"
|
|||
|
||||
step "s3-get-all-transactions"
|
||||
{
|
||||
SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3;
|
||||
SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3;
|
||||
}
|
||||
|
||||
// show that we could get all distributed transaction ids from seperate sessions
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
|
||||
setup
|
||||
{
|
||||
CREATE TABLE dist_table(id integer, value integer);
|
||||
SELECT create_distributed_table('dist_table', 'id');
|
||||
COPY dist_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV;
|
||||
CREATE TABLE dist_table(id integer, value integer);
|
||||
SELECT create_distributed_table('dist_table', 'id');
|
||||
COPY dist_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV;
|
||||
}
|
||||
|
||||
// Create and use UDF to close the connection opened in the setup step. Also return the cluster
|
||||
|
@ -18,7 +18,7 @@ session "s1"
|
|||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
// We do not need to begin a transaction on coordinator, since it will be open on workers.
|
||||
|
@ -40,27 +40,27 @@ step "s1-insert"
|
|||
|
||||
step "s1-index"
|
||||
{
|
||||
CREATE INDEX dist_table_index ON dist_table (id);
|
||||
CREATE INDEX dist_table_index ON dist_table (id);
|
||||
}
|
||||
|
||||
step "s1-select-for-update"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE');
|
||||
}
|
||||
|
||||
step "s1-commit-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
}
|
||||
|
||||
step "s1-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
|
||||
|
@ -80,17 +80,17 @@ step "s2-begin-on-worker"
|
|||
|
||||
step "s2-alter"
|
||||
{
|
||||
ALTER TABLE dist_table DROP value;
|
||||
ALTER TABLE dist_table DROP value;
|
||||
}
|
||||
|
||||
step "s2-select-for-update"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE');
|
||||
}
|
||||
|
||||
step "s2-coordinator-create-index-concurrently"
|
||||
{
|
||||
CREATE INDEX CONCURRENTLY dist_table_index_conc ON dist_table(id);
|
||||
CREATE INDEX CONCURRENTLY dist_table_index_conc ON dist_table(id);
|
||||
}
|
||||
|
||||
step "s2-commit-worker"
|
||||
|
@ -115,7 +115,7 @@ session "s3"
|
|||
|
||||
step "s3-select-count"
|
||||
{
|
||||
SELECT COUNT(*) FROM dist_table;
|
||||
SELECT COUNT(*) FROM dist_table;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1,55 +1,55 @@
|
|||
setup
|
||||
{
|
||||
CREATE TABLE append_table (test_id integer NOT NULL, data text);
|
||||
SELECT create_distributed_table('append_table', 'test_id', 'append');
|
||||
CREATE TABLE append_table (test_id integer NOT NULL, data text);
|
||||
SELECT create_distributed_table('append_table', 'test_id', 'append');
|
||||
|
||||
SELECT 1 FROM (
|
||||
SELECT min(master_create_empty_shard('append_table')) FROM generate_series(1,16)
|
||||
) a;
|
||||
SELECT 1 FROM (
|
||||
SELECT min(master_create_empty_shard('append_table')) FROM generate_series(1,16)
|
||||
) a;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE append_table;
|
||||
DROP TABLE append_table;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-truncate"
|
||||
{
|
||||
TRUNCATE append_table;
|
||||
TRUNCATE append_table;
|
||||
}
|
||||
|
||||
step "s1-drop-all-shards"
|
||||
{
|
||||
SELECT citus_drop_all_shards('append_table', 'public', 'append_table');
|
||||
SELECT citus_drop_all_shards('append_table', 'public', 'append_table');
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-truncate"
|
||||
{
|
||||
TRUNCATE append_table;
|
||||
TRUNCATE append_table;
|
||||
}
|
||||
|
||||
step "s2-drop-all-shards"
|
||||
{
|
||||
SELECT citus_drop_all_shards('append_table', 'public', 'append_table');
|
||||
SELECT citus_drop_all_shards('append_table', 'public', 'append_table');
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
{
|
||||
SELECT * FROM append_table;
|
||||
SELECT * FROM append_table;
|
||||
}
|
||||
|
||||
permutation "s1-begin" "s1-drop-all-shards" "s2-truncate" "s1-commit"
|
||||
|
|
|
@ -5,21 +5,21 @@
|
|||
// create distributed table to test behavior of DROP in concurrent operations
|
||||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE SCHEMA drop_tests;
|
||||
CREATE TABLE drop_tests.drop_hash(id integer, data text);
|
||||
SELECT create_distributed_table('drop_tests.drop_hash', 'id');
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE SCHEMA drop_tests;
|
||||
CREATE TABLE drop_tests.drop_hash(id integer, data text);
|
||||
SELECT create_distributed_table('drop_tests.drop_hash', 'id');
|
||||
|
||||
CREATE SCHEMA drop_tests_2;
|
||||
CREATE TABLE drop_tests_2.drop_hash_2(id integer, data text);
|
||||
SELECT create_distributed_table('drop_tests_2.drop_hash_2', 'id');
|
||||
CREATE SCHEMA drop_tests_2;
|
||||
CREATE TABLE drop_tests_2.drop_hash_2(id integer, data text);
|
||||
SELECT create_distributed_table('drop_tests_2.drop_hash_2', 'id');
|
||||
}
|
||||
|
||||
// drop distributed table
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS drop_tests.drop_hash, drop_tests_2.drop_hash_2 CASCADE;
|
||||
DROP SCHEMA IF EXISTS drop_tests, drop_tests_2 CASCADE;
|
||||
DROP TABLE IF EXISTS drop_tests.drop_hash, drop_tests_2.drop_hash_2 CASCADE;
|
||||
DROP SCHEMA IF EXISTS drop_tests, drop_tests_2 CASCADE;
|
||||
}
|
||||
|
||||
// session 1
|
||||
|
|
|
@ -1,108 +1,108 @@
|
|||
setup
|
||||
{
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
create schema if not exists schema1;
|
||||
create schema if not exists schema2;
|
||||
CREATE schema if not exists schema3;
|
||||
create schema if not exists schema1;
|
||||
create schema if not exists schema2;
|
||||
CREATE schema if not exists schema3;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
|
||||
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-add-node-1"
|
||||
{
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
}
|
||||
|
||||
step "s1-remove-node-1"
|
||||
{
|
||||
SELECT 1 FROM master_remove_node('localhost', 57637);
|
||||
SELECT 1 FROM master_remove_node('localhost', 57637);
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s1-create-extension-with-schema2"
|
||||
{
|
||||
CREATE extension seg with version "1.3" schema schema2;
|
||||
CREATE extension seg with version "1.3" schema schema2;
|
||||
}
|
||||
|
||||
step "s1-print"
|
||||
{
|
||||
select count(*) from pg_catalog.pg_dist_object ;
|
||||
select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg';
|
||||
SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$);
|
||||
select count(*) from pg_catalog.pg_dist_object ;
|
||||
select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg';
|
||||
SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$);
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-add-node-1"
|
||||
{
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
}
|
||||
|
||||
step "s2-create-extension-version-11"
|
||||
{
|
||||
CREATE extension seg VERSION "1.1";
|
||||
CREATE extension seg VERSION "1.1";
|
||||
}
|
||||
|
||||
step "s2-alter-extension-version-13"
|
||||
{
|
||||
ALTER extension seg update to "1.3";
|
||||
ALTER extension seg update to "1.3";
|
||||
}
|
||||
|
||||
step "s2-create-extension-with-schema1"
|
||||
{
|
||||
CREATE extension seg with version "1.3" schema schema1;
|
||||
CREATE extension seg with version "1.3" schema schema1;
|
||||
}
|
||||
|
||||
step "s2-create-extension-with-schema2"
|
||||
{
|
||||
CREATE extension seg with version "1.3" schema schema2;
|
||||
CREATE extension seg with version "1.3" schema schema2;
|
||||
}
|
||||
|
||||
step "s2-drop-extension"
|
||||
{
|
||||
drop extension seg;
|
||||
drop extension seg;
|
||||
}
|
||||
|
||||
step "s2-alter-extension-update-to-version-12"
|
||||
{
|
||||
ALTER extension seg update to "1.2";
|
||||
ALTER extension seg update to "1.2";
|
||||
}
|
||||
|
||||
step "s2-alter-extension-set-schema3"
|
||||
{
|
||||
alter extension seg set schema schema3;
|
||||
alter extension seg set schema schema3;
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s2-remove-node-1"
|
||||
{
|
||||
SELECT 1 FROM master_remove_node('localhost', 57637);
|
||||
SELECT 1 FROM master_remove_node('localhost', 57637);
|
||||
}
|
||||
|
||||
// master_//_node vs extension command
|
||||
|
|
|
@ -1,27 +1,27 @@
|
|||
setup
|
||||
{
|
||||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
CREATE TABLE test_table(column1 int, column2 int);
|
||||
SELECT create_distributed_table('test_table', 'column1');
|
||||
CREATE TABLE test_table(column1 int, column2 int);
|
||||
SELECT create_distributed_table('test_table', 'column1');
|
||||
|
||||
CREATE USER test_user_1;
|
||||
CREATE USER test_user_1;
|
||||
|
||||
CREATE USER test_user_2;
|
||||
CREATE USER test_user_2;
|
||||
|
||||
CREATE USER test_readonly;
|
||||
CREATE USER test_readonly;
|
||||
|
||||
CREATE USER test_monitor;
|
||||
CREATE USER test_monitor;
|
||||
|
||||
GRANT pg_monitor TO test_monitor;
|
||||
GRANT pg_monitor TO test_monitor;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE test_table;
|
||||
DROP USER test_user_1, test_user_2, test_readonly, test_monitor;
|
||||
DROP TABLE IF EXISTS selected_pid;
|
||||
DROP TABLE test_table;
|
||||
DROP USER test_user_1, test_user_2, test_readonly, test_monitor;
|
||||
DROP TABLE IF EXISTS selected_pid;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -29,96 +29,96 @@ session "s1"
|
|||
// run_command_on_placements is done in a separate step because the setup is executed as a single transaction
|
||||
step "s1-grant"
|
||||
{
|
||||
GRANT ALL ON test_table TO test_user_1;
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
GRANT ALL ON test_table TO test_user_1;
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
}
|
||||
|
||||
step "s1-begin-insert"
|
||||
{
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
INSERT INTO test_table VALUES (100, 100);
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
INSERT INTO test_table VALUES (100, 100);
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin-insert"
|
||||
{
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
INSERT INTO test_table VALUES (200, 200);
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
INSERT INTO test_table VALUES (200, 200);
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s3"
|
||||
|
||||
step "s3-as-admin"
|
||||
{
|
||||
-- Admin should be able to see all transactions
|
||||
SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0;
|
||||
SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0;
|
||||
-- Admin should be able to see all transactions
|
||||
SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0;
|
||||
SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0;
|
||||
}
|
||||
|
||||
step "s3-as-user-1"
|
||||
{
|
||||
-- Even though we change the user via SET ROLE, the backends' (e.g., s1/2-begin-insert)
|
||||
-- userId (e.g., PG_PROC->userId) does not change, and hence none of the
|
||||
-- transactions show up because here we are using test_user_1. This is a
|
||||
-- limitation of isolation tester, we should be able to re-connect with
|
||||
-- test_user_1 on s1/2-begin-insert to show that test_user_1 sees only its own processes
|
||||
SET ROLE test_user_1;
|
||||
SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0;
|
||||
SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0;
|
||||
-- Even though we change the user via SET ROLE, the backends' (e.g., s1/2-begin-insert)
|
||||
-- userId (e.g., PG_PROC->userId) does not change, and hence none of the
|
||||
-- transactions show up because here we are using test_user_1. This is a
|
||||
-- limitation of isolation tester, we should be able to re-connect with
|
||||
-- test_user_1 on s1/2-begin-insert to show that test_user_1 sees only its own processes
|
||||
SET ROLE test_user_1;
|
||||
SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0;
|
||||
SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0;
|
||||
}
|
||||
|
||||
step "s3-as-readonly"
|
||||
{
|
||||
-- Even though we change the user via SET ROLE, the backends' (e.g., s1/2-begin-insert)
|
||||
-- userId (e.g., PG_PROC->userId) does not change, and hence none of the
|
||||
-- transactions show up because here we are using test_readonly. This is a
|
||||
-- limitation of isolation tester, we should be able to re-connect with
|
||||
-- test_readonly on s1/2-begin-insert to show that test_readonly sees only
|
||||
-- its own processes
|
||||
SET ROLE test_readonly;
|
||||
SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0;
|
||||
SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0;
|
||||
-- Even though we change the user via SET ROLE, the backends' (e.g., s1/2-begin-insert)
|
||||
-- userId (e.g., PG_PROC->userId) does not change, and hence none of the
|
||||
-- transactions show up because here we are using test_readonly. This is a
|
||||
-- limitation of isolation tester, we should be able to re-connect with
|
||||
-- test_readonly on s1/2-begin-insert to show that test_readonly sees only
|
||||
-- its own processes
|
||||
SET ROLE test_readonly;
|
||||
SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0;
|
||||
SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0;
|
||||
}
|
||||
|
||||
step "s3-as-monitor"
|
||||
{
|
||||
-- Monitor should see all transactions
|
||||
SET ROLE test_monitor;
|
||||
SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0;
|
||||
SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0;
|
||||
-- Monitor should see all transactions
|
||||
SET ROLE test_monitor;
|
||||
SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0;
|
||||
SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0;
|
||||
}
|
||||
|
||||
step "s3-show-activity"
|
||||
{
|
||||
SET ROLE postgres;
|
||||
select count(*) from get_all_active_transactions() where process_id IN (SELECT * FROM selected_pid);
|
||||
SET ROLE postgres;
|
||||
select count(*) from get_all_active_transactions() where process_id IN (SELECT * FROM selected_pid);
|
||||
}
|
||||
|
||||
session "s4"
|
||||
|
||||
step "s4-record-pid"
|
||||
{
|
||||
SELECT pg_backend_pid() INTO selected_pid;
|
||||
SELECT pg_backend_pid() INTO selected_pid;
|
||||
}
|
||||
|
||||
session "s5"
|
||||
|
||||
step "s5-kill"
|
||||
{
|
||||
SELECT pg_terminate_backend(pg_backend_pid) FROM selected_pid;
|
||||
SELECT pg_terminate_backend(pg_backend_pid) FROM selected_pid;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1,82 +1,82 @@
|
|||
#include "isolation_mx_common.include.spec"
|
||||
|
||||
setup {
|
||||
SELECT citus_add_node('localhost', 57636, groupid:=0);
|
||||
CREATE TABLE ref_table(user_id int, value_1 int);
|
||||
SELECT create_reference_table('ref_table');
|
||||
INSERT INTO ref_table VALUES (1, 11), (2, 21), (3, 31), (4, 41), (5, 51), (6, 61), (7, 71);
|
||||
SELECT citus_add_node('localhost', 57636, groupid:=0);
|
||||
CREATE TABLE ref_table(user_id int, value_1 int);
|
||||
SELECT create_reference_table('ref_table');
|
||||
INSERT INTO ref_table VALUES (1, 11), (2, 21), (3, 31), (4, 41), (5, 51), (6, 61), (7, 71);
|
||||
|
||||
CREATE TABLE tt1(user_id int, value_1 int);
|
||||
SELECT create_distributed_table('tt1', 'user_id');
|
||||
INSERT INTO tt1 VALUES (1, 11), (2, 21), (3, 31), (4, 41), (5, 51), (6, 61), (7, 71);
|
||||
CREATE TABLE tt1(user_id int, value_1 int);
|
||||
SELECT create_distributed_table('tt1', 'user_id');
|
||||
INSERT INTO tt1 VALUES (1, 11), (2, 21), (3, 31), (4, 41), (5, 51), (6, 61), (7, 71);
|
||||
}
|
||||
|
||||
// Create and use UDF to close the connection opened in the setup step. Also return the cluster
|
||||
// back to the initial state.
|
||||
teardown
|
||||
{
|
||||
DROP TABLE ref_table;
|
||||
DROP TABLE tt1;
|
||||
SELECT citus_remove_node('localhost', 57636);
|
||||
DROP TABLE ref_table;
|
||||
DROP TABLE tt1;
|
||||
SELECT citus_remove_node('localhost', 57636);
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-update-ref-table-from-coordinator"
|
||||
{
|
||||
UPDATE ref_table SET value_1 = 15;
|
||||
UPDATE ref_table SET value_1 = 15;
|
||||
}
|
||||
|
||||
// We do not need to begin a transaction on coordinator, since it will be open on workers.
|
||||
|
||||
step "s1-start-session-level-connection"
|
||||
{
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
}
|
||||
|
||||
step "s1-begin-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
}
|
||||
|
||||
step "s1-update-dist-table"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4');
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4');
|
||||
}
|
||||
|
||||
step "s1-update-ref-table"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1');
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1');
|
||||
}
|
||||
|
||||
step "s1-delete-from-ref-table"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE user_id = 1');
|
||||
SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE user_id = 1');
|
||||
}
|
||||
|
||||
step "s1-insert-into-ref-table"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)');
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)');
|
||||
}
|
||||
|
||||
step "s1-copy-to-ref-table"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV');
|
||||
}
|
||||
|
||||
step "s1-select-for-update"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table FOR UPDATE');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table FOR UPDATE');
|
||||
}
|
||||
|
||||
step "s1-update-dist-table-id-1"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4 WHERE user_id = 1');
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4 WHERE user_id = 1');
|
||||
}
|
||||
|
||||
step "s1-commit-worker"
|
||||
|
@ -86,69 +86,69 @@ step "s1-commit-worker"
|
|||
|
||||
step "s1-alter-table"
|
||||
{
|
||||
ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id);
|
||||
ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id);
|
||||
}
|
||||
|
||||
step "s1-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
step "s1-update-on-the-coordinator"
|
||||
{
|
||||
UPDATE tt1 SET value_1 = 4;
|
||||
UPDATE tt1 SET value_1 = 4;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-start-session-level-connection"
|
||||
{
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
}
|
||||
|
||||
step "s2-begin-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
}
|
||||
|
||||
step "s2-update-dist-table"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 5');
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 5');
|
||||
}
|
||||
|
||||
step "s2-update-dist-table-id-1"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4 WHERE user_id = 1');
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4 WHERE user_id = 1');
|
||||
}
|
||||
|
||||
step "s2-update-ref-table"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1');
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1');
|
||||
}
|
||||
|
||||
step "s2-insert-into-ref-table"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)');
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)');
|
||||
}
|
||||
|
||||
step "s2-copy-to-ref-table"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV');
|
||||
}
|
||||
|
||||
step "s2-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
step "s2-update-on-the-coordinator"
|
||||
{
|
||||
UPDATE tt1 SET value_1 = 4;
|
||||
UPDATE tt1 SET value_1 = 4;
|
||||
}
|
||||
|
||||
step "s2-commit-worker"
|
||||
|
@ -160,13 +160,13 @@ session "s3"
|
|||
|
||||
step "s3-select-distributed-waiting-queries"
|
||||
{
|
||||
SELECT blocked_statement, current_statement_in_blocking_process FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
|
||||
SELECT blocked_statement, current_statement_in_blocking_process FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
|
||||
}
|
||||
|
||||
// only works for the coordinator
|
||||
step "s3-show-actual-gpids"
|
||||
{
|
||||
SELECT global_pid > 0 as gpid_exists, query FROM citus_stat_activity WHERE state = 'active' AND query IN (SELECT blocked_statement FROM citus_lock_waits UNION SELECT current_statement_in_blocking_process FROM citus_lock_waits) ORDER BY 1 DESC;
|
||||
SELECT global_pid > 0 as gpid_exists, query FROM citus_stat_activity WHERE state = 'active' AND query IN (SELECT blocked_statement FROM citus_lock_waits UNION SELECT current_statement_in_blocking_process FROM citus_lock_waits) ORDER BY 1 DESC;
|
||||
}
|
||||
|
||||
// session s1 and s4 executes the commands on the same worker node
|
||||
|
@ -174,22 +174,22 @@ session "s4"
|
|||
|
||||
step "s4-start-session-level-connection"
|
||||
{
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
}
|
||||
|
||||
step "s4-begin-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
}
|
||||
|
||||
step "s4-update-dist-table"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 5');
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 5');
|
||||
}
|
||||
|
||||
step "s4-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
step "s4-commit-worker"
|
||||
{
|
||||
|
@ -205,48 +205,48 @@ session "s5"
|
|||
|
||||
step "s5-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s5-alter"
|
||||
{
|
||||
ALTER TABLE tt1 ADD COLUMN new_column INT;
|
||||
ALTER TABLE tt1 ADD COLUMN new_column INT;
|
||||
}
|
||||
|
||||
step "s5-rollback"
|
||||
{
|
||||
ROLLBACK;
|
||||
ROLLBACK;
|
||||
}
|
||||
|
||||
session "s6"
|
||||
|
||||
step "s6-select"
|
||||
{
|
||||
SELECT user_id FROM tt1 ORDER BY user_id DESC LIMIT 1;
|
||||
SELECT user_id FROM tt1 ORDER BY user_id DESC LIMIT 1;
|
||||
}
|
||||
|
||||
session "s7"
|
||||
|
||||
step "s7-alter"
|
||||
{
|
||||
ALTER TABLE tt1 ADD COLUMN new_column INT;
|
||||
ALTER TABLE tt1 ADD COLUMN new_column INT;
|
||||
}
|
||||
|
||||
session "s8"
|
||||
|
||||
step "s8-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s8-select"
|
||||
{
|
||||
SELECT user_id FROM tt1 ORDER BY user_id DESC LIMIT 1;
|
||||
SELECT user_id FROM tt1 ORDER BY user_id DESC LIMIT 1;
|
||||
}
|
||||
|
||||
step "s8-rollback"
|
||||
{
|
||||
ROLLBACK;
|
||||
ROLLBACK;
|
||||
}
|
||||
|
||||
permutation "s1-begin" "s1-update-ref-table-from-coordinator" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update-ref-table" "s3-select-distributed-waiting-queries" "s1-commit" "s2-commit-worker" "s2-stop-connection"
|
||||
|
|
|
@ -5,15 +5,15 @@
|
|||
// create append distributed table to test behavior of COPY in concurrent operations
|
||||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE hash_copy(id integer, data text, int_data int);
|
||||
SELECT create_distributed_table('hash_copy', 'id');
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE hash_copy(id integer, data text, int_data int);
|
||||
SELECT create_distributed_table('hash_copy', 'id');
|
||||
}
|
||||
|
||||
// drop distributed table
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS hash_copy CASCADE;
|
||||
DROP TABLE IF EXISTS hash_copy CASCADE;
|
||||
}
|
||||
|
||||
// session 1
|
||||
|
@ -26,8 +26,8 @@ step "s1-router-select" { SELECT * FROM hash_copy WHERE id = 1; }
|
|||
step "s1-real-time-select" { SELECT * FROM hash_copy ORDER BY 1, 2; }
|
||||
step "s1-adaptive-select"
|
||||
{
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
}
|
||||
step "s1-insert" { INSERT INTO hash_copy VALUES(0, 'k', 0); }
|
||||
step "s1-insert-select" { INSERT INTO hash_copy SELECT * FROM hash_copy; }
|
||||
|
@ -51,10 +51,10 @@ step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM
|
|||
step "s1-commit" { COMMIT; }
|
||||
step "s1-recreate-with-replication-2"
|
||||
{
|
||||
DROP TABLE hash_copy;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE hash_copy(id integer, data text, int_data int);
|
||||
SELECT create_distributed_table('hash_copy', 'id');
|
||||
DROP TABLE hash_copy;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE hash_copy(id integer, data text, int_data int);
|
||||
SELECT create_distributed_table('hash_copy', 'id');
|
||||
}
|
||||
|
||||
// session 2
|
||||
|
@ -64,8 +64,8 @@ step "s2-router-select" { SELECT * FROM hash_copy WHERE id = 1; }
|
|||
step "s2-real-time-select" { SELECT * FROM hash_copy ORDER BY 1, 2; }
|
||||
step "s2-adaptive-select"
|
||||
{
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
}
|
||||
step "s2-insert" { INSERT INTO hash_copy VALUES(0, 'k', 0); }
|
||||
step "s2-insert-select" { INSERT INTO hash_copy SELECT * FROM hash_copy; }
|
||||
|
|
|
@ -1,158 +1,158 @@
|
|||
setup
|
||||
{
|
||||
CREATE TABLE target_table(col_1 int primary key, col_2 int);
|
||||
SELECT create_distributed_table('target_table','col_1');
|
||||
INSERT INTO target_table VALUES(1,2),(2,3),(3,4),(4,5),(5,6);
|
||||
CREATE TABLE target_table(col_1 int primary key, col_2 int);
|
||||
SELECT create_distributed_table('target_table','col_1');
|
||||
INSERT INTO target_table VALUES(1,2),(2,3),(3,4),(4,5),(5,6);
|
||||
|
||||
CREATE TABLE source_table(col_1 int, col_2 int, col_3 int);
|
||||
SELECT create_distributed_table('source_table','col_1');
|
||||
INSERT INTO source_table VALUES(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5);
|
||||
CREATE TABLE source_table(col_1 int, col_2 int, col_3 int);
|
||||
SELECT create_distributed_table('source_table','col_1');
|
||||
INSERT INTO source_table VALUES(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5);
|
||||
|
||||
SET citus.shard_replication_factor to 2;
|
||||
CREATE TABLE target_table_2(col_1 int primary key, col_2 int, col_3 int);
|
||||
SELECT create_distributed_table('target_table_2', 'col_1');
|
||||
SET citus.shard_replication_factor to 2;
|
||||
CREATE TABLE target_table_2(col_1 int primary key, col_2 int, col_3 int);
|
||||
SELECT create_distributed_table('target_table_2', 'col_1');
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE target_table, target_table_2, source_table;
|
||||
DROP TABLE target_table, target_table_2, source_table;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-begin"
|
||||
{
|
||||
SET citus.shard_replication_factor to 1;
|
||||
BEGIN;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-begin-replication-factor-2"
|
||||
{
|
||||
SET citus.shard_replication_factor to 2;
|
||||
BEGIN;
|
||||
SET citus.shard_replication_factor to 2;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-insert-into-select-conflict-update"
|
||||
{
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
|
||||
}
|
||||
|
||||
step "s1-insert-into-select-conflict-do-nothing"
|
||||
{
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT DO NOTHING;
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT DO NOTHING;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s1-insert-into-select-conflict-update-replication-factor-2"
|
||||
{
|
||||
INSERT INTO target_table_2
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
|
||||
INSERT INTO target_table_2
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-begin-replication-factor-2"
|
||||
{
|
||||
SET citus.shard_replication_factor to 2;
|
||||
BEGIN;
|
||||
SET citus.shard_replication_factor to 2;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-insert-into-select-conflict-update"
|
||||
{
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
|
||||
}
|
||||
|
||||
step "s2-insert-into-select-conflict-update-replication-factor-2"
|
||||
{
|
||||
INSERT INTO target_table_2
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
|
||||
INSERT INTO target_table_2
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
|
||||
}
|
||||
|
||||
step "s2-insert-into-select-conflict-do-nothing"
|
||||
{
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT DO NOTHING;
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT DO NOTHING;
|
||||
}
|
||||
|
||||
step "s2-update"
|
||||
{
|
||||
UPDATE target_table SET col_2 = 5;
|
||||
UPDATE target_table SET col_2 = 5;
|
||||
}
|
||||
|
||||
step "s2-delete"
|
||||
{
|
||||
DELETE FROM target_table;
|
||||
DELETE FROM target_table;
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
permutation "s1-begin" "s1-insert-into-select-conflict-update" "s2-begin" "s2-update" "s1-commit" "s2-commit"
|
||||
|
|
|
@ -1,20 +1,20 @@
|
|||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.shard_count TO 4;
|
||||
CREATE TABLE source_table(a int, b int);
|
||||
SELECT create_distributed_table('source_table', 'a');
|
||||
CREATE TABLE source_table(a int, b int);
|
||||
SELECT create_distributed_table('source_table', 'a');
|
||||
SET citus.shard_count TO 3;
|
||||
CREATE TABLE target_table(a int, b int);
|
||||
SELECT create_distributed_table('target_table', 'a');
|
||||
CREATE TABLE target_table(a int, b int);
|
||||
SELECT create_distributed_table('target_table', 'a');
|
||||
|
||||
INSERT INTO source_table SELECT i, i * i FROM generate_series(1, 10) i;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS source_table;
|
||||
DROP TABLE IF EXISTS target_table;
|
||||
DROP TABLE IF EXISTS source_table;
|
||||
DROP TABLE IF EXISTS target_table;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
|
|
@ -5,28 +5,28 @@
|
|||
// create range distributed table to test behavior of INSERT/SELECT in concurrent operations
|
||||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE insert_of_insert_select_hash(id integer, data text);
|
||||
SELECT create_distributed_table('insert_of_insert_select_hash', 'id');
|
||||
CREATE TABLE select_of_insert_select_hash(id integer, data text);
|
||||
SELECT create_distributed_table('select_of_insert_select_hash', 'id');
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE insert_of_insert_select_hash(id integer, data text);
|
||||
SELECT create_distributed_table('insert_of_insert_select_hash', 'id');
|
||||
CREATE TABLE select_of_insert_select_hash(id integer, data text);
|
||||
SELECT create_distributed_table('select_of_insert_select_hash', 'id');
|
||||
}
|
||||
|
||||
// drop distributed table
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS insert_of_insert_select_hash CASCADE;
|
||||
DROP TABLE IF EXISTS select_of_insert_select_hash CASCADE;
|
||||
DROP TABLE IF EXISTS insert_of_insert_select_hash CASCADE;
|
||||
DROP TABLE IF EXISTS select_of_insert_select_hash CASCADE;
|
||||
}
|
||||
|
||||
// session 1
|
||||
session "s1"
|
||||
step "s1-initialize"
|
||||
{
|
||||
COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV;
|
||||
COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV;
|
||||
COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV;
|
||||
COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV;
|
||||
}
|
||||
step "s1-begin" { BEGIN; }
|
||||
step "s1-insert-select" { INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; }
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
|
||||
setup
|
||||
{
|
||||
CREATE TABLE dist_table(id integer, value integer);
|
||||
SELECT create_distributed_table('dist_table', 'id');
|
||||
COPY dist_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV;
|
||||
CREATE TABLE dist_table(id integer, value integer);
|
||||
SELECT create_distributed_table('dist_table', 'id');
|
||||
COPY dist_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV;
|
||||
}
|
||||
|
||||
// Create and use UDF to close the connection opened in the setup step. Also return the cluster
|
||||
|
@ -30,22 +30,22 @@ step "s1-begin-on-worker"
|
|||
|
||||
step "s1-colocated-insert-select"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table');
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table');
|
||||
}
|
||||
|
||||
step "s1-insert-select-via-coordinator"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table');
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table');
|
||||
}
|
||||
|
||||
step "s1-commit-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
}
|
||||
|
||||
step "s1-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
|
||||
|
@ -53,7 +53,7 @@ session "s2"
|
|||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
// We do not need to begin a transaction on coordinator, since it will be open on workers.
|
||||
|
@ -70,22 +70,22 @@ step "s2-begin-on-worker"
|
|||
|
||||
step "s2-colocated-insert-select"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table');
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table');
|
||||
}
|
||||
|
||||
step "s2-insert-select-via-coordinator"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table');
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table');
|
||||
}
|
||||
|
||||
step "s2-insert"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table VALUES (5, 50), (6, 60)')
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table VALUES (5, 50), (6, 60)')
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table')
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table')
|
||||
}
|
||||
|
||||
step "s2-update"
|
||||
|
@ -95,22 +95,22 @@ step "s2-update"
|
|||
|
||||
step "s2-copy"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY dist_table FROM PROGRAM ''echo 5, 50 && echo 9, 90 && echo 10, 100''WITH CSV');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY dist_table FROM PROGRAM ''echo 5, 50 && echo 9, 90 && echo 10, 100''WITH CSV');
|
||||
}
|
||||
|
||||
step "s2-coordinator-drop"
|
||||
{
|
||||
DROP TABLE dist_table;
|
||||
DROP TABLE dist_table;
|
||||
}
|
||||
|
||||
step "s2-select-for-update"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE');
|
||||
}
|
||||
|
||||
step "s2-coordinator-create-index-concurrently"
|
||||
{
|
||||
CREATE INDEX CONCURRENTLY dist_table_index ON dist_table(id);
|
||||
CREATE INDEX CONCURRENTLY dist_table_index ON dist_table(id);
|
||||
}
|
||||
|
||||
step "s2-commit-worker"
|
||||
|
@ -125,7 +125,7 @@ step "s2-stop-connection"
|
|||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
// We use this as a way to wait for s2-ddl-create-index-concurrently to
|
||||
|
@ -138,7 +138,7 @@ session "s3"
|
|||
|
||||
step "s3-select-count"
|
||||
{
|
||||
SELECT COUNT(*) FROM dist_table;
|
||||
SELECT COUNT(*) FROM dist_table;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -5,15 +5,15 @@
|
|||
// create range distributed table to test behavior of INSERT in concurrent operations
|
||||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE insert_hash(id integer, data text);
|
||||
SELECT create_distributed_table('insert_hash', 'id');
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE insert_hash(id integer, data text);
|
||||
SELECT create_distributed_table('insert_hash', 'id');
|
||||
}
|
||||
|
||||
// drop distributed table
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS insert_hash CASCADE;
|
||||
DROP TABLE IF EXISTS insert_hash CASCADE;
|
||||
}
|
||||
|
||||
// session 1
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
|
||||
setup
|
||||
{
|
||||
CREATE TABLE insert_table(id integer, value integer);
|
||||
SELECT create_distributed_table('insert_table', 'id');
|
||||
COPY insert_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV;
|
||||
CREATE TABLE insert_table(id integer, value integer);
|
||||
SELECT create_distributed_table('insert_table', 'id');
|
||||
COPY insert_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV;
|
||||
}
|
||||
|
||||
// Create and use UDF to close the connection opened in the setup step. Also return the cluster
|
||||
|
@ -31,22 +31,22 @@ step "s1-begin-on-worker"
|
|||
|
||||
step "s1-insert"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)');
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)');
|
||||
}
|
||||
|
||||
step "s1-insert-multi-row"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60), (7, 70), (8, 80)');
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60), (7, 70), (8, 80)');
|
||||
}
|
||||
|
||||
step "s1-commit-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
}
|
||||
|
||||
step "s1-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
|
||||
|
@ -71,47 +71,47 @@ step "s2-insert"
|
|||
|
||||
step "s2-insert-multi-row"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60), (7, 70), (8, 80)');
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60), (7, 70), (8, 80)');
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM insert_table WHERE id = 6');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM insert_table WHERE id = 6');
|
||||
}
|
||||
|
||||
step "s2-insert-select"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table SELECT * FROM insert_table');
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table SELECT * FROM insert_table');
|
||||
}
|
||||
|
||||
step "s2-update"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE insert_table SET value = 65 WHERE id = 6');
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE insert_table SET value = 65 WHERE id = 6');
|
||||
}
|
||||
|
||||
step "s2-update-multi-row"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE insert_table SET value = 67 WHERE id IN (6, 7)');
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE insert_table SET value = 67 WHERE id IN (6, 7)');
|
||||
}
|
||||
|
||||
step "s2-copy"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY insert_table FROM PROGRAM ''echo 9, 90 && echo 10, 100''WITH CSV');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY insert_table FROM PROGRAM ''echo 9, 90 && echo 10, 100''WITH CSV');
|
||||
}
|
||||
|
||||
step "s2-truncate"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('TRUNCATE insert_table');
|
||||
SELECT run_commands_on_session_level_connection_to_node('TRUNCATE insert_table');
|
||||
}
|
||||
|
||||
step "s2-select-for-update"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM insert_table WHERE id = 6 FOR UPDATE');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM insert_table WHERE id = 6 FOR UPDATE');
|
||||
}
|
||||
|
||||
step "s2-coordinator-create-index-concurrently"
|
||||
{
|
||||
CREATE INDEX CONCURRENTLY insert_table_index ON insert_table(id);
|
||||
CREATE INDEX CONCURRENTLY insert_table_index ON insert_table(id);
|
||||
}
|
||||
|
||||
step "s2-commit-worker"
|
||||
|
@ -134,7 +134,7 @@ session "s3"
|
|||
|
||||
step "s3-select-count"
|
||||
{
|
||||
SELECT COUNT(*) FROM insert_table;
|
||||
SELECT COUNT(*) FROM insert_table;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -50,4 +50,3 @@ permutation "s1-begin" "s1-insert" "s2-vacuum-analyze"(*) "s2-wait" "s1-commit"
|
|||
|
||||
// INSERT and VACUUM FULL should block each other.
|
||||
permutation "s1-begin" "s1-insert" "s2-vacuum-full" "s1-commit"
|
||||
|
||||
|
|
|
@ -2,17 +2,17 @@
|
|||
// being moved contains columns that don't allow for binary encoding
|
||||
setup
|
||||
{
|
||||
SET citus.shard_count TO 1;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 45076800;
|
||||
CREATE TABLE t_nonbinary(id bigserial, nonbinary aclitem);
|
||||
SELECT create_distributed_table('t_nonbinary', 'id');
|
||||
INSERT INTO t_nonbinary (SELECT i, 'user postgres=r/postgres' FROM generate_series(1, 5) i);
|
||||
SET citus.shard_count TO 1;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 45076800;
|
||||
CREATE TABLE t_nonbinary(id bigserial, nonbinary aclitem);
|
||||
SELECT create_distributed_table('t_nonbinary', 'id');
|
||||
INSERT INTO t_nonbinary (SELECT i, 'user postgres=r/postgres' FROM generate_series(1, 5) i);
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE t_nonbinary;
|
||||
DROP TABLE t_nonbinary;
|
||||
}
|
||||
|
||||
|
||||
|
@ -20,18 +20,18 @@ session "s1"
|
|||
|
||||
step "s1-move-placement"
|
||||
{
|
||||
SELECT citus_move_shard_placement(45076800, 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='force_logical');
|
||||
SELECT citus_move_shard_placement(45076800, 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='force_logical');
|
||||
}
|
||||
|
||||
step "s1-select"
|
||||
{
|
||||
SELECT * FROM t_nonbinary order by id;
|
||||
SELECT * FROM t_nonbinary order by id;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
step "s2-insert"
|
||||
{
|
||||
INSERT INTO t_nonbinary (SELECT i, 'user postgres=r/postgres' FROM generate_series(6, 10) i);
|
||||
INSERT INTO t_nonbinary (SELECT i, 'user postgres=r/postgres' FROM generate_series(6, 10) i);
|
||||
}
|
||||
|
||||
session "s3"
|
||||
|
|
|
@ -3,19 +3,19 @@
|
|||
|
||||
setup
|
||||
{
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('logical_replicate_placement', 'x');
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('logical_replicate_placement', 'x');
|
||||
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard;
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard;
|
||||
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE logical_replicate_placement;
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE logical_replicate_placement;
|
||||
}
|
||||
|
||||
|
||||
|
@ -23,22 +23,22 @@ session "s1"
|
|||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-move-placement"
|
||||
{
|
||||
SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard;
|
||||
SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard;
|
||||
}
|
||||
|
||||
step "s1-end"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s1-select"
|
||||
{
|
||||
SELECT * FROM logical_replicate_placement order by y;
|
||||
SELECT * FROM logical_replicate_placement order by y;
|
||||
}
|
||||
|
||||
step "s1-insert"
|
||||
|
@ -55,7 +55,7 @@ session "s2"
|
|||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
|
@ -87,22 +87,22 @@ step "s2-upsert"
|
|||
|
||||
step "s2-copy"
|
||||
{
|
||||
COPY logical_replicate_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5\n15,30"' WITH CSV;
|
||||
COPY logical_replicate_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5\n15,30"' WITH CSV;
|
||||
}
|
||||
|
||||
step "s2-truncate"
|
||||
{
|
||||
TRUNCATE logical_replicate_placement;
|
||||
TRUNCATE logical_replicate_placement;
|
||||
}
|
||||
|
||||
step "s2-alter-table"
|
||||
{
|
||||
ALTER TABLE logical_replicate_placement ADD COLUMN z INT;
|
||||
ALTER TABLE logical_replicate_placement ADD COLUMN z INT;
|
||||
}
|
||||
|
||||
step "s2-end"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s3"
|
||||
|
@ -149,4 +149,3 @@ permutation "s1-insert" "s1-begin" "s2-begin" "s2-select" "s1-move-placement" "s
|
|||
permutation "s1-begin" "s2-begin" "s2-copy" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution"
|
||||
permutation "s1-insert" "s1-begin" "s2-begin" "s2-truncate" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution"
|
||||
permutation "s1-begin" "s2-begin" "s2-alter-table" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution"
|
||||
|
||||
|
|
|
@ -5,18 +5,18 @@
|
|||
|
||||
setup
|
||||
{
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('logical_replicate_placement', 'x');
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('logical_replicate_placement', 'x');
|
||||
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard;
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE logical_replicate_placement;
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE logical_replicate_placement;
|
||||
}
|
||||
|
||||
|
||||
|
@ -24,22 +24,22 @@ session "s1"
|
|||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-move-placement"
|
||||
{
|
||||
SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard;
|
||||
SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s1-select"
|
||||
{
|
||||
SELECT * FROM logical_replicate_placement order by y;
|
||||
SELECT * FROM logical_replicate_placement order by y;
|
||||
}
|
||||
|
||||
step "s1-insert"
|
||||
|
@ -56,12 +56,12 @@ session "s2"
|
|||
|
||||
step "s2-start-session-level-connection"
|
||||
{
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
}
|
||||
|
||||
step "s2-begin-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
|
@ -86,12 +86,12 @@ step "s2-update"
|
|||
|
||||
step "s2-commit-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
}
|
||||
|
||||
step "s2-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
session "s3"
|
||||
|
@ -128,4 +128,3 @@ permutation "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker"
|
|||
permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection"
|
||||
permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-delete" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection"
|
||||
permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-get-shard-distribution" "s2-stop-connection"
|
||||
|
||||
|
|
|
@ -13,8 +13,8 @@ setup
|
|||
|
||||
SET citus.max_cached_conns_per_worker to 0;
|
||||
SET citus.next_shard_id TO 1234000;
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
CREATE TABLE dist(column1 int PRIMARY KEY, column2 int);
|
||||
SELECT create_distributed_table('dist', 'column1');
|
||||
|
@ -22,7 +22,7 @@ setup
|
|||
CREATE USER new_user;
|
||||
GRANT ALL ON SCHEMA public TO new_user;
|
||||
|
||||
SELECT get_shard_id_for_distribution_column('dist', 23) INTO selected_shard;
|
||||
SELECT get_shard_id_for_distribution_column('dist', 23) INTO selected_shard;
|
||||
GRANT ALL ON TABLE selected_shard TO new_user;
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ session "s1"
|
|||
|
||||
step "s1-no-connection-cache"
|
||||
{
|
||||
SET citus.max_cached_conns_per_worker to 0;
|
||||
SET citus.max_cached_conns_per_worker to 0;
|
||||
}
|
||||
|
||||
step "s1-table-owner-new_user"
|
||||
|
@ -69,7 +69,7 @@ step "s1-user-spec"
|
|||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-set-role"
|
||||
|
@ -84,12 +84,12 @@ step "s1-move-placement"
|
|||
|
||||
step "s1-reset-role"
|
||||
{
|
||||
RESET ROLE;
|
||||
RESET ROLE;
|
||||
}
|
||||
|
||||
step "s1-end"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s1-select"
|
||||
|
@ -106,7 +106,7 @@ session "s2"
|
|||
|
||||
step "s2-no-connection-cache"
|
||||
{
|
||||
SET citus.max_cached_conns_per_worker to 0;
|
||||
SET citus.max_cached_conns_per_worker to 0;
|
||||
}
|
||||
|
||||
step "s2-insert"
|
||||
|
@ -118,7 +118,7 @@ session "s3"
|
|||
|
||||
step "s3-no-connection-cache"
|
||||
{
|
||||
SET citus.max_cached_conns_per_worker to 0;
|
||||
SET citus.max_cached_conns_per_worker to 0;
|
||||
}
|
||||
|
||||
step "s3-acquire-advisory-lock"
|
||||
|
|
|
@ -2,18 +2,18 @@
|
|||
// so setting the corresponding shard here is useful
|
||||
setup
|
||||
{
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('logical_replicate_placement', 'x');
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('logical_replicate_placement', 'x');
|
||||
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard;
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE logical_replicate_placement;
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE logical_replicate_placement;
|
||||
}
|
||||
|
||||
|
||||
|
@ -21,22 +21,22 @@ session "s1"
|
|||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-move-placement"
|
||||
{
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
|
||||
}
|
||||
|
||||
step "s1-end"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s1-select"
|
||||
{
|
||||
SELECT * FROM logical_replicate_placement order by y;
|
||||
SELECT * FROM logical_replicate_placement order by y;
|
||||
}
|
||||
|
||||
step "s1-insert"
|
||||
|
@ -46,7 +46,7 @@ step "s1-insert"
|
|||
|
||||
step "s1-get-shard-distribution"
|
||||
{
|
||||
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport;
|
||||
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
@ -58,9 +58,9 @@ step "s2-begin"
|
|||
|
||||
step "s2-move-placement"
|
||||
{
|
||||
SELECT master_move_shard_placement(
|
||||
get_shard_id_for_distribution_column('logical_replicate_placement', 4),
|
||||
'localhost', 57637, 'localhost', 57638);
|
||||
SELECT master_move_shard_placement(
|
||||
get_shard_id_for_distribution_column('logical_replicate_placement', 4),
|
||||
'localhost', 57637, 'localhost', 57638);
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
|
@ -97,7 +97,7 @@ step "s2-upsert"
|
|||
|
||||
step "s2-end"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s3"
|
||||
|
|
|
@ -4,17 +4,17 @@
|
|||
// so setting the corresponding shard here is useful
|
||||
setup
|
||||
{
|
||||
SET citus.shard_count TO 8;
|
||||
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('logical_replicate_placement', 'x');
|
||||
SET citus.shard_count TO 8;
|
||||
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('logical_replicate_placement', 'x');
|
||||
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard;
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE logical_replicate_placement;
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE logical_replicate_placement;
|
||||
}
|
||||
|
||||
|
||||
|
@ -22,79 +22,79 @@ session "s1"
|
|||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-move-placement"
|
||||
{
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s1-select"
|
||||
{
|
||||
SELECT * FROM logical_replicate_placement order by y;
|
||||
SELECT * FROM logical_replicate_placement order by y;
|
||||
}
|
||||
|
||||
step "s1-insert"
|
||||
{
|
||||
INSERT INTO logical_replicate_placement VALUES (15, 15);
|
||||
INSERT INTO logical_replicate_placement VALUES (15, 15);
|
||||
}
|
||||
|
||||
step "s1-get-shard-distribution"
|
||||
{
|
||||
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport;
|
||||
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-start-session-level-connection"
|
||||
{
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
}
|
||||
|
||||
step "s2-begin-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM logical_replicate_placement ORDER BY y');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM logical_replicate_placement ORDER BY y');
|
||||
}
|
||||
|
||||
step "s2-insert"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO logical_replicate_placement VALUES (15, 15)');
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO logical_replicate_placement VALUES (15, 15)');
|
||||
}
|
||||
|
||||
step "s2-select-for-update"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM logical_replicate_placement WHERE x=15 FOR UPDATE');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM logical_replicate_placement WHERE x=15 FOR UPDATE');
|
||||
}
|
||||
|
||||
step "s2-delete"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('DELETE FROM logical_replicate_placement WHERE x = 15');
|
||||
SELECT run_commands_on_session_level_connection_to_node('DELETE FROM logical_replicate_placement WHERE x = 15');
|
||||
}
|
||||
|
||||
step "s2-update"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15');
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15');
|
||||
}
|
||||
|
||||
step "s2-commit-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
}
|
||||
|
||||
step "s2-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
session "s3"
|
||||
|
@ -104,12 +104,12 @@ session "s3"
|
|||
// source code
|
||||
step "s3-acquire-advisory-lock"
|
||||
{
|
||||
SELECT pg_advisory_lock(44000, 55152);
|
||||
SELECT pg_advisory_lock(44000, 55152);
|
||||
}
|
||||
|
||||
step "s3-release-advisory-lock"
|
||||
{
|
||||
SELECT pg_advisory_unlock(44000, 55152);
|
||||
SELECT pg_advisory_unlock(44000, 55152);
|
||||
}
|
||||
|
||||
##// nonblocking tests lie below ###
|
||||
|
@ -132,4 +132,3 @@ permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin
|
|||
permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-delete" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection"
|
||||
permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-get-shard-distribution" "s2-stop-connection"
|
||||
permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-get-shard-distribution" "s2-stop-connection"
|
||||
|
||||
|
|
|
@ -8,25 +8,25 @@
|
|||
// so setting the corresponding shard here is useful
|
||||
setup
|
||||
{
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
CREATE TABLE logical_replicate_partitioned(x int, y int, PRIMARY KEY (x,y) ) PARTITION BY RANGE(y);
|
||||
SELECT create_distributed_table('logical_replicate_partitioned', 'x');
|
||||
CREATE TABLE logical_replicate_partitioned_1 PARTITION OF logical_replicate_partitioned
|
||||
FOR VALUES FROM (0) TO (100);
|
||||
CREATE TABLE logical_replicate_partitioned_2 PARTITION OF logical_replicate_partitioned
|
||||
FOR VALUES FROM (100) TO (200);
|
||||
CREATE TABLE logical_replicate_partitioned(x int, y int, PRIMARY KEY (x,y) ) PARTITION BY RANGE(y);
|
||||
SELECT create_distributed_table('logical_replicate_partitioned', 'x');
|
||||
CREATE TABLE logical_replicate_partitioned_1 PARTITION OF logical_replicate_partitioned
|
||||
FOR VALUES FROM (0) TO (100);
|
||||
CREATE TABLE logical_replicate_partitioned_2 PARTITION OF logical_replicate_partitioned
|
||||
FOR VALUES FROM (100) TO (200);
|
||||
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_partitioned', 5) INTO selected_partitioned_shard;
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_partitioned_1', 5) INTO selected_single_partition_shard;
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_partitioned', 5) INTO selected_partitioned_shard;
|
||||
SELECT get_shard_id_for_distribution_column('logical_replicate_partitioned_1', 5) INTO selected_single_partition_shard;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE selected_partitioned_shard;
|
||||
DROP TABLE selected_single_partition_shard;
|
||||
DROP TABLE logical_replicate_partitioned;
|
||||
DROP TABLE selected_partitioned_shard;
|
||||
DROP TABLE selected_single_partition_shard;
|
||||
DROP TABLE logical_replicate_partitioned;
|
||||
}
|
||||
|
||||
|
||||
|
@ -34,22 +34,22 @@ session "s1"
|
|||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-move-placement-partitioned"
|
||||
{
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_partitioned_shard), 'localhost', 57638, 'localhost', 57637);
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_partitioned_shard), 'localhost', 57638, 'localhost', 57637);
|
||||
}
|
||||
|
||||
step "s1-move-placement-single-partition"
|
||||
{
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_single_partition_shard), 'localhost', 57638, 'localhost', 57637);
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_single_partition_shard), 'localhost', 57638, 'localhost', 57637);
|
||||
}
|
||||
|
||||
step "s1-end"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
@ -78,17 +78,17 @@ step "s2-upsert-partitioned"
|
|||
|
||||
step "s2-copy-partitioned"
|
||||
{
|
||||
COPY logical_replicate_partitioned FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
|
||||
COPY logical_replicate_partitioned FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
|
||||
}
|
||||
|
||||
step "s2-truncate-partitioned"
|
||||
{
|
||||
TRUNCATE logical_replicate_partitioned;
|
||||
TRUNCATE logical_replicate_partitioned;
|
||||
}
|
||||
|
||||
step "s2-alter-table-partitioned"
|
||||
{
|
||||
ALTER TABLE logical_replicate_partitioned ADD COLUMN z INT;
|
||||
ALTER TABLE logical_replicate_partitioned ADD COLUMN z INT;
|
||||
}
|
||||
|
||||
|
||||
|
@ -136,5 +136,3 @@ permutation "s1-begin" "s1-move-placement-single-partition" "s2-truncate-partiti
|
|||
permutation "s1-begin" "s1-move-placement-single-partition" "s2-alter-table-partitioned" "s1-end"
|
||||
permutation "s1-begin" "s2-truncate-partitioned" "s1-move-placement-single-partition" "s1-end"
|
||||
permutation "s1-begin" "s2-alter-table-partitioned" "s1-move-placement-single-partition" "s1-end"
|
||||
|
||||
|
||||
|
|
|
@ -36,16 +36,16 @@ step "s2-begin" { BEGIN; }
|
|||
step "s2-update-node-1" {
|
||||
-- update a specific node by address
|
||||
SELECT master_update_node(nodeid, 'localhost', nodeport + 10)
|
||||
FROM pg_dist_node
|
||||
WHERE nodename = 'localhost'
|
||||
AND nodeport = 57637;
|
||||
FROM pg_dist_node
|
||||
WHERE nodename = 'localhost'
|
||||
AND nodeport = 57637;
|
||||
}
|
||||
step "s2-update-node-1-force" {
|
||||
-- update a specific node by address (force)
|
||||
SELECT master_update_node(nodeid, 'localhost', nodeport + 10, force => true, lock_cooldown => 100)
|
||||
FROM pg_dist_node
|
||||
WHERE nodename = 'localhost'
|
||||
AND nodeport = 57637;
|
||||
FROM pg_dist_node
|
||||
WHERE nodename = 'localhost'
|
||||
AND nodeport = 57637;
|
||||
}
|
||||
step "s2-abort" { ABORT; }
|
||||
|
||||
|
|
|
@ -1,32 +1,32 @@
|
|||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
CREATE USER my_user;
|
||||
SELECT run_command_on_workers('CREATE USER my_user');
|
||||
CREATE USER my_user;
|
||||
SELECT run_command_on_workers('CREATE USER my_user');
|
||||
|
||||
CREATE TABLE my_table (test_id integer NOT NULL, data text);
|
||||
SELECT create_distributed_table('my_table', 'test_id');
|
||||
|
||||
GRANT USAGE ON SCHEMA public TO my_user;
|
||||
GRANT SELECT ON TABLE my_table TO my_user;
|
||||
GRANT USAGE ON SCHEMA public TO my_user;
|
||||
GRANT SELECT ON TABLE my_table TO my_user;
|
||||
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
CREATE FUNCTION make_external_connection_to_node(text,int,text,text)
|
||||
RETURNS void
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT;
|
||||
RESET citus.enable_ddl_propagation;
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
CREATE FUNCTION make_external_connection_to_node(text,int,text,text)
|
||||
RETURNS void
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT;
|
||||
RESET citus.enable_ddl_propagation;
|
||||
|
||||
SELECT run_command_on_workers('ALTER SYSTEM SET citus.max_client_connections TO 1');
|
||||
SELECT run_command_on_workers('SELECT pg_reload_conf()');
|
||||
SELECT run_command_on_workers('ALTER SYSTEM SET citus.max_client_connections TO 1');
|
||||
SELECT run_command_on_workers('SELECT pg_reload_conf()');
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
SELECT run_command_on_workers('ALTER SYSTEM RESET citus.max_client_connections');
|
||||
SELECT run_command_on_workers('SELECT pg_reload_conf()');
|
||||
DROP TABLE my_table;
|
||||
SELECT run_command_on_workers('ALTER SYSTEM RESET citus.max_client_connections');
|
||||
SELECT run_command_on_workers('SELECT pg_reload_conf()');
|
||||
DROP TABLE my_table;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -34,13 +34,13 @@ session "s1"
|
|||
// Setup runs as a transaction, so run_command_on_placements must be separate
|
||||
step "s1-grant"
|
||||
{
|
||||
SELECT result FROM run_command_on_placements('my_table', 'GRANT SELECT ON TABLE %s TO my_user');
|
||||
SELECT result FROM run_command_on_placements('my_table', 'GRANT SELECT ON TABLE %s TO my_user');
|
||||
}
|
||||
|
||||
// Open one external connection as non-superuser, is allowed
|
||||
step "s1-connect"
|
||||
{
|
||||
SELECT make_external_connection_to_node('localhost', 57637, 'my_user', current_database());
|
||||
SELECT make_external_connection_to_node('localhost', 57637, 'my_user', current_database());
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
@ -48,13 +48,13 @@ session "s2"
|
|||
// Open another external connection as non-superuser, not allowed
|
||||
step "s2-connect"
|
||||
{
|
||||
SELECT make_external_connection_to_node('localhost', 57637, 'my_user', current_database());
|
||||
SELECT make_external_connection_to_node('localhost', 57637, 'my_user', current_database());
|
||||
}
|
||||
|
||||
// Open another external connection as superuser, allowed
|
||||
step "s2-connect-superuser"
|
||||
{
|
||||
SELECT make_external_connection_to_node('localhost', 57637, 'postgres', current_database());
|
||||
SELECT make_external_connection_to_node('localhost', 57637, 'postgres', current_database());
|
||||
}
|
||||
|
||||
session "s3"
|
||||
|
@ -62,8 +62,8 @@ session "s3"
|
|||
// Open internal connections as non-superuser, allowed
|
||||
step "s3-select"
|
||||
{
|
||||
SET ROLE my_user;
|
||||
SELECT count(*) FROM my_table;
|
||||
SET ROLE my_user;
|
||||
SELECT count(*) FROM my_table;
|
||||
}
|
||||
|
||||
permutation "s1-grant" "s1-connect" "s2-connect" "s2-connect-superuser" "s3-select"
|
||||
|
|
|
@ -2,42 +2,42 @@
|
|||
|
||||
setup
|
||||
{
|
||||
CREATE OR REPLACE FUNCTION trigger_metadata_sync()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
CREATE OR REPLACE FUNCTION trigger_metadata_sync()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
|
||||
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
|
||||
CREATE TABLE deadlock_detection_test (user_id int UNIQUE, some_val int);
|
||||
INSERT INTO deadlock_detection_test SELECT i, i FROM generate_series(1,7) i;
|
||||
SELECT create_distributed_table('deadlock_detection_test', 'user_id');
|
||||
CREATE TABLE deadlock_detection_test (user_id int UNIQUE, some_val int);
|
||||
INSERT INTO deadlock_detection_test SELECT i, i FROM generate_series(1,7) i;
|
||||
SELECT create_distributed_table('deadlock_detection_test', 'user_id');
|
||||
|
||||
CREATE TABLE t2(a int);
|
||||
SELECT create_distributed_table('t2', 'a');
|
||||
CREATE TABLE t2(a int);
|
||||
SELECT create_distributed_table('t2', 'a');
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP FUNCTION trigger_metadata_sync();
|
||||
DROP TABLE deadlock_detection_test;
|
||||
DROP TABLE t2;
|
||||
SET citus.shard_replication_factor = 1;
|
||||
DROP FUNCTION trigger_metadata_sync();
|
||||
DROP TABLE deadlock_detection_test;
|
||||
DROP TABLE t2;
|
||||
SET citus.shard_replication_factor = 1;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "enable-deadlock-detection"
|
||||
{
|
||||
ALTER SYSTEM SET citus.distributed_deadlock_detection_factor TO 3;
|
||||
ALTER SYSTEM SET citus.distributed_deadlock_detection_factor TO 3;
|
||||
}
|
||||
|
||||
step "disable-deadlock-detection"
|
||||
{
|
||||
ALTER SYSTEM SET citus.distributed_deadlock_detection_factor TO -1;
|
||||
ALTER SYSTEM SET citus.distributed_deadlock_detection_factor TO -1;
|
||||
}
|
||||
|
||||
step "reload-conf"
|
||||
|
@ -47,49 +47,49 @@ step "reload-conf"
|
|||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-update-1"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1;
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1;
|
||||
}
|
||||
|
||||
step "s1-update-2"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2;
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-start-session-level-connection"
|
||||
{
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
}
|
||||
|
||||
step "s2-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
step "s2-begin-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
}
|
||||
|
||||
step "s2-update-1-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1');
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1');
|
||||
}
|
||||
|
||||
step "s2-update-2-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2');
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2');
|
||||
}
|
||||
|
||||
step "s2-truncate-on-worker"
|
||||
|
@ -99,7 +99,7 @@ step "s2-truncate-on-worker"
|
|||
|
||||
step "s2-commit-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
}
|
||||
|
||||
session "s3"
|
||||
|
@ -111,12 +111,12 @@ step "s3-invalidate-metadata"
|
|||
|
||||
step "s3-resync"
|
||||
{
|
||||
SELECT trigger_metadata_sync();
|
||||
SELECT trigger_metadata_sync();
|
||||
}
|
||||
|
||||
step "s3-wait"
|
||||
{
|
||||
SELECT pg_sleep(2);
|
||||
SELECT pg_sleep(2);
|
||||
}
|
||||
|
||||
// Backends can block metadata sync. The following test verifies that if this happens,
|
||||
|
|
|
@ -1,46 +1,46 @@
|
|||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 123000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 123000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 123000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 123000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 123000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 123000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 123000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 123000;
|
||||
|
||||
-- Create the necessary test utility function
|
||||
CREATE OR REPLACE FUNCTION activate_node_snapshot()
|
||||
RETURNS text[]
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
SELECT create_distributed_function('activate_node_snapshot()');
|
||||
-- Create the necessary test utility function
|
||||
CREATE OR REPLACE FUNCTION activate_node_snapshot()
|
||||
RETURNS text[]
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
SELECT create_distributed_function('activate_node_snapshot()');
|
||||
|
||||
-- Create distributed tables
|
||||
CREATE TABLE ref_table (test_id integer, y int unique);
|
||||
SELECT create_reference_table('ref_table');
|
||||
-- Create distributed tables
|
||||
CREATE TABLE ref_table (test_id integer, y int unique);
|
||||
SELECT create_reference_table('ref_table');
|
||||
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
SELECT create_distributed_table('dist_table', 'x');
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
SELECT create_distributed_table('dist_table', 'x');
|
||||
|
||||
CREATE TABLE dist_partitioned_table (x int, y int) PARTITION BY RANGE(y);
|
||||
SELECT create_distributed_table('dist_partitioned_table', 'x');
|
||||
CREATE TABLE dist_partitioned_table (x int, y int) PARTITION BY RANGE(y);
|
||||
SELECT create_distributed_table('dist_partitioned_table', 'x');
|
||||
|
||||
CREATE TABLE dist_partitioned_table_p1(x int, y int);
|
||||
CREATE TABLE dist_partitioned_table_p1(x int, y int);
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS ref_table,
|
||||
dist_table,
|
||||
dist_partitioned_table,
|
||||
dist_partitioned_table_p1,
|
||||
dist_partitioned_table_p2,
|
||||
new_dist_table,
|
||||
new_ref_table;
|
||||
DROP TABLE IF EXISTS ref_table,
|
||||
dist_table,
|
||||
dist_partitioned_table,
|
||||
dist_partitioned_table_p1,
|
||||
dist_partitioned_table_p2,
|
||||
new_dist_table,
|
||||
new_ref_table;
|
||||
|
||||
|
||||
DROP FUNCTION activate_node_snapshot();
|
||||
DROP FUNCTION IF EXISTS squares(int);
|
||||
DROP TYPE IF EXISTS my_type;
|
||||
DROP FUNCTION activate_node_snapshot();
|
||||
DROP FUNCTION IF EXISTS squares(int);
|
||||
DROP TYPE IF EXISTS my_type;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -57,154 +57,154 @@ step "s1-commit"
|
|||
|
||||
step "s1-start-metadata-sync"
|
||||
{
|
||||
SELECT start_metadata_sync_to_node('localhost', 57638);
|
||||
SELECT start_metadata_sync_to_node('localhost', 57638);
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s2-start-metadata-sync-to-same-node"
|
||||
{
|
||||
SELECT start_metadata_sync_to_node('localhost', 57638);
|
||||
SELECT start_metadata_sync_to_node('localhost', 57638);
|
||||
}
|
||||
|
||||
step "s2-start-metadata-sync-to-another-node"
|
||||
{
|
||||
SELECT start_metadata_sync_to_node('localhost', 57637);
|
||||
SELECT start_metadata_sync_to_node('localhost', 57637);
|
||||
}
|
||||
|
||||
step "s2-alter-table"
|
||||
{
|
||||
ALTER TABLE dist_table ADD COLUMN z int;
|
||||
ALTER TABLE dist_table ADD COLUMN z int;
|
||||
}
|
||||
|
||||
step "s2-add-fk"
|
||||
{
|
||||
ALTER TABLE dist_table ADD CONSTRAINT y_fk FOREIGN KEY (y) REFERENCES ref_table(y);
|
||||
ALTER TABLE dist_table ADD CONSTRAINT y_fk FOREIGN KEY (y) REFERENCES ref_table(y);
|
||||
}
|
||||
|
||||
step "s2-drop-fk"
|
||||
{
|
||||
ALTER TABLE dist_table DROP CONSTRAINT y_fk;
|
||||
ALTER TABLE dist_table DROP CONSTRAINT y_fk;
|
||||
}
|
||||
|
||||
step "s2-drop-table"
|
||||
{
|
||||
DROP TABLE dist_table;
|
||||
DROP TABLE dist_table;
|
||||
}
|
||||
|
||||
step "s2-create-dist-table"
|
||||
{
|
||||
CREATE TABLE new_dist_table(id int, data int);
|
||||
SELECT create_distributed_table('new_dist_table', 'id');
|
||||
CREATE TABLE new_dist_table(id int, data int);
|
||||
SELECT create_distributed_table('new_dist_table', 'id');
|
||||
}
|
||||
|
||||
step "s2-create-schema"
|
||||
{
|
||||
CREATE SCHEMA dist_schema;
|
||||
CREATE TABLE dist_schema.dist_table_in_schema(id int, data int);
|
||||
CREATE SCHEMA dist_schema;
|
||||
CREATE TABLE dist_schema.dist_table_in_schema(id int, data int);
|
||||
|
||||
SELECT create_distributed_table('dist_schema.dist_table_in_schema', 'id');
|
||||
SELECT create_distributed_table('dist_schema.dist_table_in_schema', 'id');
|
||||
}
|
||||
|
||||
step "s2-drop-schema"
|
||||
{
|
||||
DROP SCHEMA dist_schema CASCADE;
|
||||
DROP SCHEMA dist_schema CASCADE;
|
||||
}
|
||||
|
||||
step "s2-create-ref-table"
|
||||
{
|
||||
CREATE TABLE new_ref_table(id int, data int);
|
||||
SELECT create_reference_table('new_ref_table');
|
||||
CREATE TABLE new_ref_table(id int, data int);
|
||||
SELECT create_reference_table('new_ref_table');
|
||||
}
|
||||
|
||||
step "s2-attach-partition"
|
||||
{
|
||||
ALTER TABLE dist_partitioned_table ATTACH PARTITION dist_partitioned_table_p1 FOR VALUES FROM (1) TO (9);
|
||||
ALTER TABLE dist_partitioned_table ATTACH PARTITION dist_partitioned_table_p1 FOR VALUES FROM (1) TO (9);
|
||||
}
|
||||
|
||||
step "s2-detach-partition"
|
||||
{
|
||||
ALTER TABLE dist_partitioned_table DETACH PARTITION dist_partitioned_table_p1;
|
||||
ALTER TABLE dist_partitioned_table DETACH PARTITION dist_partitioned_table_p1;
|
||||
}
|
||||
|
||||
step "s2-create-partition-of"
|
||||
{
|
||||
CREATE TABLE dist_partitioned_table_p2 PARTITION OF dist_partitioned_table FOR VALUES FROM (10) TO (20);
|
||||
CREATE TABLE dist_partitioned_table_p2 PARTITION OF dist_partitioned_table FOR VALUES FROM (10) TO (20);
|
||||
}
|
||||
|
||||
step "s2-create-type"
|
||||
{
|
||||
CREATE TYPE my_type AS (a int, b int);
|
||||
CREATE TYPE my_type AS (a int, b int);
|
||||
}
|
||||
|
||||
step "s2-drop-type"
|
||||
{
|
||||
DROP TYPE my_type;
|
||||
DROP TYPE my_type;
|
||||
}
|
||||
|
||||
step "s2-alter-type"
|
||||
{
|
||||
ALTER TYPE my_type ADD ATTRIBUTE x int;
|
||||
ALTER TYPE my_type ADD ATTRIBUTE x int;
|
||||
}
|
||||
|
||||
step "s2-create-dist-func"
|
||||
{
|
||||
CREATE FUNCTION squares(int) RETURNS SETOF RECORD
|
||||
CREATE FUNCTION squares(int) RETURNS SETOF RECORD
|
||||
AS $$ SELECT i, i * i FROM generate_series(1, $1) i $$
|
||||
LANGUAGE SQL;
|
||||
|
||||
SELECT create_distributed_function('squares(int)');
|
||||
SELECT create_distributed_function('squares(int)');
|
||||
}
|
||||
|
||||
step "s2-drop-dist-func"
|
||||
{
|
||||
DROP FUNCTION squares(int);
|
||||
DROP FUNCTION squares(int);
|
||||
}
|
||||
|
||||
session "s3"
|
||||
|
||||
step "s3-compare-snapshot"
|
||||
{
|
||||
SELECT count(*) = 0 AS same_metadata_in_workers
|
||||
FROM
|
||||
(
|
||||
(
|
||||
SELECT unnest(activate_node_snapshot())
|
||||
EXCEPT
|
||||
SELECT unnest(result::text[]) AS unnested_result
|
||||
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
|
||||
)
|
||||
UNION
|
||||
(
|
||||
SELECT unnest(result::text[]) AS unnested_result
|
||||
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
|
||||
EXCEPT
|
||||
SELECT unnest(activate_node_snapshot())
|
||||
)
|
||||
) AS foo;
|
||||
SELECT count(*) = 0 AS same_metadata_in_workers
|
||||
FROM
|
||||
(
|
||||
(
|
||||
SELECT unnest(activate_node_snapshot())
|
||||
EXCEPT
|
||||
SELECT unnest(result::text[]) AS unnested_result
|
||||
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
|
||||
)
|
||||
UNION
|
||||
(
|
||||
SELECT unnest(result::text[]) AS unnested_result
|
||||
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
|
||||
EXCEPT
|
||||
SELECT unnest(activate_node_snapshot())
|
||||
)
|
||||
) AS foo;
|
||||
}
|
||||
|
||||
step "s3-compare-type-definition"
|
||||
{
|
||||
SELECT run_command_on_workers($$SELECT '(1,1,1)'::my_type$$);
|
||||
SELECT run_command_on_workers($$SELECT '(1,1,1)'::my_type$$);
|
||||
}
|
||||
|
||||
step "s3-debug"
|
||||
{
|
||||
SELECT unnest(activate_node_snapshot());
|
||||
SELECT unnest(activate_node_snapshot());
|
||||
|
||||
SELECT unnest(result::text[])
|
||||
FROM run_command_on_workers('SELECT activate_node_snapshot()');
|
||||
SELECT unnest(result::text[])
|
||||
FROM run_command_on_workers('SELECT activate_node_snapshot()');
|
||||
}
|
||||
|
||||
// before running any updates to metadata, make sure all nodes have same metadata in the cluster
|
||||
|
|
|
@ -1,34 +1,34 @@
|
|||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor to 2;
|
||||
CREATE TABLE users_test_table(user_id int, value_1 int, value_2 int, value_3 int);
|
||||
SELECT create_distributed_table('users_test_table', 'user_id');
|
||||
INSERT INTO users_test_table VALUES
|
||||
(1, 5, 6, 7),
|
||||
(2, 12, 7, 18),
|
||||
(3, 23, 8, 25),
|
||||
(4, 42, 9, 23),
|
||||
(5, 35, 10, 17),
|
||||
(6, 21, 11, 25),
|
||||
(7, 27, 12, 18);
|
||||
SET citus.shard_replication_factor to 2;
|
||||
CREATE TABLE users_test_table(user_id int, value_1 int, value_2 int, value_3 int);
|
||||
SELECT create_distributed_table('users_test_table', 'user_id');
|
||||
INSERT INTO users_test_table VALUES
|
||||
(1, 5, 6, 7),
|
||||
(2, 12, 7, 18),
|
||||
(3, 23, 8, 25),
|
||||
(4, 42, 9, 23),
|
||||
(5, 35, 10, 17),
|
||||
(6, 21, 11, 25),
|
||||
(7, 27, 12, 18);
|
||||
|
||||
CREATE TABLE events_test_table (user_id int, value_1 int, value_2 int, value_3 int);
|
||||
SELECT create_distributed_table('events_test_table', 'user_id');
|
||||
INSERT INTO events_test_table VALUES
|
||||
(1, 5, 7, 7),
|
||||
(3, 11, 78, 18),
|
||||
(5, 22, 9, 25),
|
||||
(7, 41, 10, 23),
|
||||
(1, 20, 12, 25),
|
||||
(3, 26, 13, 18),
|
||||
(5, 17, 14, 4);
|
||||
CREATE TABLE events_test_table (user_id int, value_1 int, value_2 int, value_3 int);
|
||||
SELECT create_distributed_table('events_test_table', 'user_id');
|
||||
INSERT INTO events_test_table VALUES
|
||||
(1, 5, 7, 7),
|
||||
(3, 11, 78, 18),
|
||||
(5, 22, 9, 25),
|
||||
(7, 41, 10, 23),
|
||||
(1, 20, 12, 25),
|
||||
(3, 26, 13, 18),
|
||||
(5, 17, 14, 4);
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE users_test_table;
|
||||
DROP TABLE events_test_table;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
DROP TABLE users_test_table;
|
||||
DROP TABLE events_test_table;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -45,12 +45,12 @@ step "s1-insert_to_events_test_table"
|
|||
|
||||
step "s1-update_events_test_table"
|
||||
{
|
||||
UPDATE users_test_table SET value_1 = 3;
|
||||
UPDATE users_test_table SET value_1 = 3;
|
||||
}
|
||||
|
||||
step "s1-delete_events_test_table"
|
||||
{
|
||||
DELETE FROM events_test_table WHERE user_id = 1 or user_id = 3;
|
||||
DELETE FROM events_test_table WHERE user_id = 1 or user_id = 3;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
|
@ -62,7 +62,7 @@ session "s2"
|
|||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-modify_with_subquery_v1"
|
||||
|
@ -72,7 +72,7 @@ step "s2-modify_with_subquery_v1"
|
|||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
// tests to check locks on subqueries are taken
|
||||
|
@ -82,4 +82,3 @@ permutation "s1-begin" "s2-begin" "s2-modify_with_subquery_v1" "s1-delete_events
|
|||
permutation "s1-begin" "s2-begin" "s1-insert_to_events_test_table" "s2-modify_with_subquery_v1" "s1-commit" "s2-commit"
|
||||
permutation "s1-begin" "s2-begin" "s1-update_events_test_table" "s2-modify_with_subquery_v1" "s1-commit" "s2-commit"
|
||||
permutation "s1-begin" "s2-begin" "s1-delete_events_test_table" "s2-modify_with_subquery_v1" "s1-commit" "s2-commit"
|
||||
|
||||
|
|
|
@ -2,18 +2,18 @@
|
|||
// so setting the corresponding shard here is useful
|
||||
setup
|
||||
{
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE test_move_placement (x int, y int);
|
||||
SELECT create_distributed_table('test_move_placement', 'x');
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE test_move_placement (x int, y int);
|
||||
SELECT create_distributed_table('test_move_placement', 'x');
|
||||
|
||||
SELECT get_shard_id_for_distribution_column('test_move_placement', 5) INTO selected_shard;
|
||||
SELECT get_shard_id_for_distribution_column('test_move_placement', 5) INTO selected_shard;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE test_move_placement;
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE test_move_placement;
|
||||
DROP TABLE selected_shard;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -30,96 +30,96 @@ step "s1-begin"
|
|||
// hence not all placements are cached
|
||||
step "s1-load-cache"
|
||||
{
|
||||
TRUNCATE test_move_placement;
|
||||
TRUNCATE test_move_placement;
|
||||
}
|
||||
|
||||
step "s1-insert"
|
||||
{
|
||||
INSERT INTO test_move_placement VALUES (5, 10);
|
||||
INSERT INTO test_move_placement VALUES (5, 10);
|
||||
}
|
||||
|
||||
step "s1-update"
|
||||
{
|
||||
UPDATE test_move_placement SET y = 5 WHERE x = 5;
|
||||
UPDATE test_move_placement SET y = 5 WHERE x = 5;
|
||||
}
|
||||
|
||||
step "s1-delete"
|
||||
{
|
||||
DELETE FROM test_move_placement WHERE x = 5;
|
||||
DELETE FROM test_move_placement WHERE x = 5;
|
||||
}
|
||||
|
||||
step "s1-select"
|
||||
{
|
||||
SELECT count(*) FROM test_move_placement WHERE x = 5;
|
||||
SELECT count(*) FROM test_move_placement WHERE x = 5;
|
||||
}
|
||||
|
||||
step "s1-ddl"
|
||||
{
|
||||
CREATE INDEX test_move_placement_index ON test_move_placement(x);
|
||||
CREATE INDEX test_move_placement_index ON test_move_placement(x);
|
||||
}
|
||||
|
||||
step "s1-copy"
|
||||
{
|
||||
COPY test_move_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
|
||||
COPY test_move_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
|
||||
step "s2-move-placement"
|
||||
{
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637, 'force_logical');
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637, 'force_logical');
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s2-print-content"
|
||||
{
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_move_placement', 'select y from %s WHERE x = 5')
|
||||
WHERE
|
||||
shardid IN (SELECT * FROM selected_shard)
|
||||
ORDER BY
|
||||
nodeport;
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_move_placement', 'select y from %s WHERE x = 5')
|
||||
WHERE
|
||||
shardid IN (SELECT * FROM selected_shard)
|
||||
ORDER BY
|
||||
nodeport;
|
||||
}
|
||||
|
||||
step "s2-print-index-count"
|
||||
{
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_move_placement', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_move_placement', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
}
|
||||
|
||||
step "s2-print-placements"
|
||||
{
|
||||
SELECT
|
||||
nodename, nodeport, count(*)
|
||||
FROM
|
||||
pg_dist_shard_placement
|
||||
WHERE
|
||||
shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_placement'::regclass)
|
||||
AND
|
||||
shardstate = 1
|
||||
GROUP BY
|
||||
nodename, nodeport;
|
||||
SELECT
|
||||
nodename, nodeport, count(*)
|
||||
FROM
|
||||
pg_dist_shard_placement
|
||||
WHERE
|
||||
shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_placement'::regclass)
|
||||
AND
|
||||
shardstate = 1
|
||||
GROUP BY
|
||||
nodename, nodeport;
|
||||
}
|
||||
|
||||
// move a placement while concurrently performing an update/delete/insert/copy
|
||||
|
|
|
@ -1,27 +1,27 @@
|
|||
setup
|
||||
{
|
||||
SET citus.shard_count to 2;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SELECT setval('pg_dist_shardid_seq',
|
||||
CASE WHEN nextval('pg_dist_shardid_seq') > 1699999 OR nextval('pg_dist_shardid_seq') < 1600000
|
||||
THEN 1600000
|
||||
ELSE nextval('pg_dist_shardid_seq')-2
|
||||
END);
|
||||
SET citus.shard_count to 2;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SELECT setval('pg_dist_shardid_seq',
|
||||
CASE WHEN nextval('pg_dist_shardid_seq') > 1699999 OR nextval('pg_dist_shardid_seq') < 1600000
|
||||
THEN 1600000
|
||||
ELSE nextval('pg_dist_shardid_seq')-2
|
||||
END);
|
||||
|
||||
CREATE TABLE referenced_table (id int PRIMARY KEY, value int);
|
||||
SELECT create_reference_table('referenced_table');
|
||||
CREATE TABLE referenced_table (id int PRIMARY KEY, value int);
|
||||
SELECT create_reference_table('referenced_table');
|
||||
|
||||
CREATE TABLE referencing_table (id int PRIMARY KEY, value int);
|
||||
SELECT create_distributed_table('referencing_table', 'id');
|
||||
CREATE TABLE referencing_table (id int PRIMARY KEY, value int);
|
||||
SELECT create_distributed_table('referencing_table', 'id');
|
||||
|
||||
SELECT get_shard_id_for_distribution_column('referencing_table', 2) INTO selected_shard_for_test_table;
|
||||
SELECT get_shard_id_for_distribution_column('referencing_table', 2) INTO selected_shard_for_test_table;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE referencing_table;
|
||||
DROP TABLE referenced_table;
|
||||
DROP TABLE selected_shard_for_test_table;
|
||||
DROP TABLE referencing_table;
|
||||
DROP TABLE referenced_table;
|
||||
DROP TABLE selected_shard_for_test_table;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -33,73 +33,73 @@ step "s1-begin"
|
|||
|
||||
step "s1-insert-referenced"
|
||||
{
|
||||
INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x);
|
||||
INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x);
|
||||
}
|
||||
|
||||
step "s1-insert-referencing"
|
||||
{
|
||||
INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x);
|
||||
INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x);
|
||||
}
|
||||
|
||||
step "s1-delete"
|
||||
{
|
||||
DELETE FROM referenced_table WHERE id < 5;
|
||||
DELETE FROM referenced_table WHERE id < 5;
|
||||
}
|
||||
|
||||
step "s1-update"
|
||||
{
|
||||
UPDATE referenced_table SET value = 5 WHERE id = 5;
|
||||
UPDATE referenced_table SET value = 5 WHERE id = 5;
|
||||
}
|
||||
|
||||
step "s1-ddl"
|
||||
{
|
||||
CREATE INDEX referenced_table_index ON referenced_table(id);
|
||||
CREATE INDEX referenced_table_index ON referenced_table(id);
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-add-fkey"
|
||||
{
|
||||
ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE;
|
||||
ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE;
|
||||
}
|
||||
|
||||
step "s2-move-placement-blocking"
|
||||
{
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637, shard_transfer_mode:='block_writes');
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637, shard_transfer_mode:='block_writes');
|
||||
}
|
||||
|
||||
step "s2-move-placement-nonblocking"
|
||||
{
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637);
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637);
|
||||
}
|
||||
|
||||
step "s2-print-cluster"
|
||||
{
|
||||
-- row count per shard
|
||||
SELECT
|
||||
nodeport, shardid, success, result
|
||||
FROM
|
||||
run_command_on_placements('referencing_table', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport, shardid;
|
||||
-- row count per shard
|
||||
SELECT
|
||||
nodeport, shardid, success, result
|
||||
FROM
|
||||
run_command_on_placements('referencing_table', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport, shardid;
|
||||
|
||||
-- rows
|
||||
SELECT * FROM referencing_table ORDER BY 1;
|
||||
-- rows
|
||||
SELECT * FROM referencing_table ORDER BY 1;
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s3"
|
||||
|
@ -129,4 +129,3 @@ permutation "s2-add-fkey" "s3-acquire-advisory-lock" "s1-insert-referenced" "s1-
|
|||
permutation "s2-add-fkey" "s3-acquire-advisory-lock" "s1-insert-referenced" "s1-insert-referencing" "s2-begin" "s2-move-placement-nonblocking" "s1-update" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
|
||||
permutation "s2-add-fkey" "s3-acquire-advisory-lock" "s1-insert-referenced" "s1-insert-referencing" "s2-begin" "s2-move-placement-nonblocking" "s1-ddl" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
|
||||
permutation "s2-add-fkey" "s3-acquire-advisory-lock" "s1-insert-referenced" "s2-begin" "s2-move-placement-nonblocking" "s1-insert-referencing" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
|
||||
|
||||
|
|
|
@ -2,27 +2,27 @@
|
|||
// so setting the corresponding shard here is useful
|
||||
setup
|
||||
{
|
||||
SET citus.shard_count TO 2;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 102011;
|
||||
CREATE TABLE test_move_table (x int, y int);
|
||||
SELECT create_distributed_table('test_move_table', 'x');
|
||||
SET citus.shard_count TO 2;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 102011;
|
||||
CREATE TABLE test_move_table (x int, y int);
|
||||
SELECT create_distributed_table('test_move_table', 'x');
|
||||
|
||||
SELECT get_shard_id_for_distribution_column('test_move_table', 5) INTO selected_shard_for_test_table;
|
||||
SELECT get_shard_id_for_distribution_column('test_move_table', 5) INTO selected_shard_for_test_table;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE test_move_table;
|
||||
DROP TABLE selected_shard_for_test_table;
|
||||
DROP TABLE test_move_table;
|
||||
DROP TABLE selected_shard_for_test_table;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE isolation_cleanup_orphaned_resources()
|
||||
LANGUAGE C
|
||||
AS 'citus', $$isolation_cleanup_orphaned_resources$$;
|
||||
COMMENT ON PROCEDURE isolation_cleanup_orphaned_resources()
|
||||
IS 'cleanup orphaned shards';
|
||||
RESET citus.enable_metadata_sync;
|
||||
CALL isolation_cleanup_orphaned_resources();
|
||||
CREATE OR REPLACE PROCEDURE isolation_cleanup_orphaned_resources()
|
||||
LANGUAGE C
|
||||
AS 'citus', $$isolation_cleanup_orphaned_resources$$;
|
||||
COMMENT ON PROCEDURE isolation_cleanup_orphaned_resources()
|
||||
IS 'cleanup orphaned shards';
|
||||
RESET citus.enable_metadata_sync;
|
||||
CALL isolation_cleanup_orphaned_resources();
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -30,17 +30,17 @@ session "s1"
|
|||
// with copy all placements are cached
|
||||
step "s1-load-cache"
|
||||
{
|
||||
COPY test_move_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
|
||||
COPY test_move_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
|
||||
}
|
||||
|
||||
step "s1-move-placement"
|
||||
{
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical');
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical');
|
||||
}
|
||||
|
||||
step "s1-move-placement-back"
|
||||
{
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637, 'force_logical');
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637, 'force_logical');
|
||||
}
|
||||
|
||||
step "s1-wait" {}
|
||||
|
@ -49,36 +49,36 @@ session "s2"
|
|||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-select-from-table"
|
||||
{
|
||||
SELECT * FROM test_move_table WHERE x=5;
|
||||
SELECT * FROM test_move_table WHERE x=5;
|
||||
}
|
||||
|
||||
step "s2-move-placement"
|
||||
{
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical');
|
||||
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical');
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s2-print-placements"
|
||||
{
|
||||
SELECT
|
||||
nodename, nodeport, count(*)
|
||||
FROM
|
||||
pg_dist_shard_placement
|
||||
WHERE
|
||||
shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_table'::regclass)
|
||||
AND
|
||||
shardstate = 1
|
||||
GROUP BY
|
||||
nodename, nodeport;
|
||||
SELECT
|
||||
nodename, nodeport, count(*)
|
||||
FROM
|
||||
pg_dist_shard_placement
|
||||
WHERE
|
||||
shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_table'::regclass)
|
||||
AND
|
||||
shardstate = 1
|
||||
GROUP BY
|
||||
nodename, nodeport;
|
||||
}
|
||||
|
||||
// two concurrent shard moves on the same shard
|
||||
|
|
|
@ -1,37 +1,37 @@
|
|||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SET citus.shard_count to 32;
|
||||
SET citus.multi_shard_modify_mode to 'parallel';
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SET citus.shard_count to 32;
|
||||
SET citus.multi_shard_modify_mode to 'parallel';
|
||||
|
||||
CREATE TABLE users_test_table(user_id int, value_1 int, value_2 int, value_3 int);
|
||||
SELECT create_distributed_table('users_test_table', 'user_id');
|
||||
INSERT INTO users_test_table VALUES
|
||||
(1, 5, 6, 7),
|
||||
(2, 12, 7, 18),
|
||||
(3, 23, 8, 25),
|
||||
(4, 42, 9, 23),
|
||||
(5, 35, 10, 17),
|
||||
(6, 21, 11, 25),
|
||||
(7, 27, 12, 18);
|
||||
CREATE TABLE users_test_table(user_id int, value_1 int, value_2 int, value_3 int);
|
||||
SELECT create_distributed_table('users_test_table', 'user_id');
|
||||
INSERT INTO users_test_table VALUES
|
||||
(1, 5, 6, 7),
|
||||
(2, 12, 7, 18),
|
||||
(3, 23, 8, 25),
|
||||
(4, 42, 9, 23),
|
||||
(5, 35, 10, 17),
|
||||
(6, 21, 11, 25),
|
||||
(7, 27, 12, 18);
|
||||
|
||||
CREATE TABLE events_test_table (user_id int, value_1 int, value_2 int, value_3 int);
|
||||
SELECT create_distributed_table('events_test_table', 'user_id');
|
||||
INSERT INTO events_test_table VALUES
|
||||
(1, 5, 7, 7),
|
||||
(3, 11, 78, 18),
|
||||
(5, 22, 9, 25),
|
||||
(7, 41, 10, 23),
|
||||
(1, 20, 12, 25),
|
||||
(3, 26, 13, 18),
|
||||
(5, 17, 14, 4);
|
||||
CREATE TABLE events_test_table (user_id int, value_1 int, value_2 int, value_3 int);
|
||||
SELECT create_distributed_table('events_test_table', 'user_id');
|
||||
INSERT INTO events_test_table VALUES
|
||||
(1, 5, 7, 7),
|
||||
(3, 11, 78, 18),
|
||||
(5, 22, 9, 25),
|
||||
(7, 41, 10, 23),
|
||||
(1, 20, 12, 25),
|
||||
(3, 26, 13, 18),
|
||||
(5, 17, 14, 4);
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE users_test_table;
|
||||
DROP TABLE events_test_table;
|
||||
SET citus.shard_count to 4;
|
||||
DROP TABLE users_test_table;
|
||||
DROP TABLE events_test_table;
|
||||
SET citus.shard_count to 4;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -48,24 +48,24 @@ step "s1-change_connection_mode_to_sequential"
|
|||
|
||||
step "s1-update_all_value_1"
|
||||
{
|
||||
UPDATE users_test_table SET value_1 = 3;
|
||||
UPDATE users_test_table SET value_1 = 3;
|
||||
}
|
||||
|
||||
step "s1-update_even_concurrently"
|
||||
{
|
||||
SET citus.enable_deadlock_prevention TO off;
|
||||
UPDATE users_test_table SET value_1 = 3 WHERE user_id % 2 = 0;
|
||||
SET citus.enable_deadlock_prevention TO on;
|
||||
SET citus.enable_deadlock_prevention TO off;
|
||||
UPDATE users_test_table SET value_1 = 3 WHERE user_id % 2 = 0;
|
||||
SET citus.enable_deadlock_prevention TO on;
|
||||
}
|
||||
|
||||
step "s1-update_value_1_of_1_or_3_to_5"
|
||||
{
|
||||
UPDATE users_test_table SET value_1 = 5 WHERE user_id = 1 or user_id = 3;
|
||||
UPDATE users_test_table SET value_1 = 5 WHERE user_id = 1 or user_id = 3;
|
||||
}
|
||||
|
||||
step "s1-update_value_1_of_2_or_4_to_5"
|
||||
{
|
||||
UPDATE users_test_table SET value_1 = 5 WHERE user_id = 2 or user_id = 4;
|
||||
UPDATE users_test_table SET value_1 = 5 WHERE user_id = 2 or user_id = 4;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
|
@ -77,7 +77,7 @@ session "s2"
|
|||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-change_connection_mode_to_sequential"
|
||||
|
@ -87,44 +87,44 @@ step "s2-change_connection_mode_to_sequential"
|
|||
|
||||
step "s2-select"
|
||||
{
|
||||
SELECT * FROM users_test_table ORDER BY value_2, value_3;
|
||||
SELECT * FROM users_test_table ORDER BY value_2, value_3;
|
||||
}
|
||||
|
||||
step "s2-insert-to-table"
|
||||
{
|
||||
INSERT INTO users_test_table VALUES (1,2,3,4);
|
||||
INSERT INTO users_test_table VALUES (1,2,3,4);
|
||||
}
|
||||
|
||||
step "s2-insert-into-select"
|
||||
{
|
||||
INSERT INTO users_test_table SELECT * FROM events_test_table;
|
||||
INSERT INTO users_test_table SELECT * FROM events_test_table;
|
||||
}
|
||||
|
||||
step "s2-update_all_value_1"
|
||||
{
|
||||
UPDATE users_test_table SET value_1 = 6;
|
||||
UPDATE users_test_table SET value_1 = 6;
|
||||
}
|
||||
|
||||
step "s2-update_odd_concurrently"
|
||||
{
|
||||
SET citus.enable_deadlock_prevention = off;
|
||||
UPDATE users_test_table SET value_1 = 3 WHERE user_id % 2 = 1;
|
||||
SET citus.enable_deadlock_prevention TO on;
|
||||
SET citus.enable_deadlock_prevention = off;
|
||||
UPDATE users_test_table SET value_1 = 3 WHERE user_id % 2 = 1;
|
||||
SET citus.enable_deadlock_prevention TO on;
|
||||
}
|
||||
|
||||
step "s2-update_value_1_of_1_or_3_to_8"
|
||||
{
|
||||
UPDATE users_test_table SET value_1 = 8 WHERE user_id = 1 or user_id = 3;
|
||||
UPDATE users_test_table SET value_1 = 8 WHERE user_id = 1 or user_id = 3;
|
||||
}
|
||||
|
||||
step "s2-update_value_1_of_4_or_6_to_4"
|
||||
{
|
||||
UPDATE users_test_table SET value_1 = 4 WHERE user_id = 4 or user_id = 6;
|
||||
UPDATE users_test_table SET value_1 = 4 WHERE user_id = 4 or user_id = 6;
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
// test with parallel connections
|
||||
|
|
|
@ -1,26 +1,26 @@
|
|||
setup
|
||||
{
|
||||
SET citus.max_cached_conns_per_worker to 0;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.max_cached_conns_per_worker to 0;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
CREATE USER test_user_1;
|
||||
CREATE USER test_user_1;
|
||||
|
||||
CREATE USER test_user_2;
|
||||
CREATE USER test_user_2;
|
||||
|
||||
GRANT CREATE ON SCHEMA public TO test_user_1, test_user_2;
|
||||
GRANT CREATE ON SCHEMA public TO test_user_1, test_user_2;
|
||||
|
||||
CREATE TABLE test_table(column1 int, column2 int);
|
||||
ALTER TABLE test_table OWNER TO test_user_1;
|
||||
SELECT create_distributed_table('test_table', 'column1');
|
||||
CREATE TABLE test_table(column1 int, column2 int);
|
||||
ALTER TABLE test_table OWNER TO test_user_1;
|
||||
SELECT create_distributed_table('test_table', 'column1');
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
BEGIN;
|
||||
DROP TABLE IF EXISTS test_table;
|
||||
REVOKE CREATE ON SCHEMA public FROM test_user_1, test_user_2;
|
||||
DROP USER test_user_1, test_user_2;
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
DROP TABLE IF EXISTS test_table;
|
||||
REVOKE CREATE ON SCHEMA public FROM test_user_1, test_user_2;
|
||||
DROP USER test_user_1, test_user_2;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -31,49 +31,49 @@ session "s1"
|
|||
// By setting the cached connections to zero we prevent the use of cached conncetions.
|
||||
// These steps can be removed once the root cause is solved
|
||||
step "s1-no-connection-cache" {
|
||||
SET citus.max_cached_conns_per_worker to 0;
|
||||
SET citus.max_cached_conns_per_worker to 0;
|
||||
}
|
||||
|
||||
step "s1-grant"
|
||||
{
|
||||
SET ROLE test_user_1;
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
SET ROLE test_user_1;
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
}
|
||||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
}
|
||||
|
||||
step "s1-index"
|
||||
{
|
||||
CREATE INDEX test_index ON test_table(column1);
|
||||
CREATE INDEX test_index ON test_table(column1);
|
||||
}
|
||||
|
||||
step "s1-reindex"
|
||||
{
|
||||
REINDEX TABLE test_table;
|
||||
REINDEX TABLE test_table;
|
||||
}
|
||||
|
||||
step "s1-drop-index"
|
||||
{
|
||||
DROP INDEX IF EXISTS test_index;
|
||||
DROP INDEX IF EXISTS test_index;
|
||||
}
|
||||
|
||||
step "s1-insert"
|
||||
{
|
||||
UPDATE test_table SET column2 = 1;
|
||||
UPDATE test_table SET column2 = 1;
|
||||
}
|
||||
|
||||
step "s1-truncate"
|
||||
{
|
||||
TRUNCATE test_table;
|
||||
TRUNCATE test_table;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
@ -84,43 +84,43 @@ session "s2"
|
|||
// By setting the cached connections to zero we prevent the use of cached conncetions.
|
||||
// These steps can be removed once the root cause is solved
|
||||
step "s2-no-connection-cache" {
|
||||
SET citus.max_cached_conns_per_worker to 0;
|
||||
SET citus.max_cached_conns_per_worker to 0;
|
||||
}
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
}
|
||||
|
||||
step "s2-index"
|
||||
{
|
||||
CREATE INDEX test_index ON test_table(column1);
|
||||
CREATE INDEX test_index ON test_table(column1);
|
||||
}
|
||||
|
||||
step "s2-reindex"
|
||||
{
|
||||
REINDEX TABLE test_table;
|
||||
REINDEX TABLE test_table;
|
||||
}
|
||||
|
||||
step "s2-drop-index"
|
||||
{
|
||||
DROP INDEX IF EXISTS test_index;
|
||||
DROP INDEX IF EXISTS test_index;
|
||||
}
|
||||
|
||||
step "s2-insert"
|
||||
{
|
||||
UPDATE test_table SET column2 = 2;
|
||||
UPDATE test_table SET column2 = 2;
|
||||
}
|
||||
|
||||
step "s2-truncate"
|
||||
{
|
||||
TRUNCATE test_table;
|
||||
TRUNCATE test_table;
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
// REINDEX
|
||||
|
@ -137,4 +137,3 @@ permutation "s1-no-connection-cache" "s2-no-connection-cache" "s1-grant" "s1-beg
|
|||
permutation "s1-no-connection-cache" "s2-no-connection-cache" "s1-begin" "s2-begin" "s2-truncate" "s1-insert" "s2-commit" "s1-commit"
|
||||
permutation "s1-no-connection-cache" "s2-no-connection-cache" "s1-grant" "s1-begin" "s2-begin" "s1-truncate" "s2-insert" "s1-insert" "s1-commit" "s2-commit"
|
||||
permutation "s1-no-connection-cache" "s2-no-connection-cache" "s1-grant" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit"
|
||||
|
||||
|
|
|
@ -39,5 +39,5 @@ setup
|
|||
false)
|
||||
FROM pg_dist_node;
|
||||
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
}
|
||||
|
|
|
@ -8,23 +8,23 @@
|
|||
//
|
||||
setup
|
||||
{
|
||||
SET citus.shard_count to 1;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SET citus.shard_count to 1;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SELECT setval('pg_dist_shardid_seq', 1500000);
|
||||
|
||||
-- Cleanup any orphan shards that might be left over from a previous run.
|
||||
CREATE OR REPLACE FUNCTION run_try_drop_marked_resources()
|
||||
RETURNS VOID
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT VOLATILE;
|
||||
-- Cleanup any orphan shards that might be left over from a previous run.
|
||||
CREATE OR REPLACE FUNCTION run_try_drop_marked_resources()
|
||||
RETURNS VOID
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT VOLATILE;
|
||||
|
||||
CREATE TABLE to_split_table (id int PRIMARY KEY, value int);
|
||||
SELECT create_distributed_table('to_split_table', 'id');
|
||||
CREATE TABLE to_split_table (id int PRIMARY KEY, value int);
|
||||
SELECT create_distributed_table('to_split_table', 'id');
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
SELECT run_try_drop_marked_resources();
|
||||
SELECT run_try_drop_marked_resources();
|
||||
|
||||
DROP TABLE to_split_table;
|
||||
}
|
||||
|
@ -34,19 +34,19 @@ session "s1"
|
|||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
// cache all placements
|
||||
step "s1-load-cache"
|
||||
{
|
||||
-- Indirect way to load cache.
|
||||
TRUNCATE to_split_table;
|
||||
-- Indirect way to load cache.
|
||||
TRUNCATE to_split_table;
|
||||
}
|
||||
|
||||
step "s1-lock-to-split-shard"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN; LOCK TABLE to_split_table_1500001 IN ACCESS SHARE MODE;');
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN; LOCK TABLE to_split_table_1500001 IN ACCESS SHARE MODE;');
|
||||
}
|
||||
|
||||
// this advisory lock with (almost) random values are only used
|
||||
|
@ -64,12 +64,12 @@ step "s1-release-split-advisory-lock"
|
|||
|
||||
step "s1-run-cleaner"
|
||||
{
|
||||
SELECT run_try_drop_marked_resources();
|
||||
SELECT run_try_drop_marked_resources();
|
||||
}
|
||||
|
||||
step "s1-start-connection"
|
||||
{
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
}
|
||||
|
||||
step "s1-stop-connection"
|
||||
|
@ -79,21 +79,21 @@ step "s1-stop-connection"
|
|||
|
||||
step "s1-show-pg_dist_cleanup"
|
||||
{
|
||||
SELECT object_name, object_type, policy_type FROM pg_dist_cleanup;
|
||||
SELECT object_name, object_type, policy_type FROM pg_dist_cleanup;
|
||||
}
|
||||
|
||||
step "s1-non-blocking-shard-split"
|
||||
{
|
||||
SELECT pg_catalog.citus_split_shard_by_split_points(
|
||||
1500001,
|
||||
ARRAY['-1073741824'],
|
||||
ARRAY[2, 2],
|
||||
'force_logical');
|
||||
SELECT pg_catalog.citus_split_shard_by_split_points(
|
||||
1500001,
|
||||
ARRAY['-1073741824'],
|
||||
ARRAY[2, 2],
|
||||
'force_logical');
|
||||
}
|
||||
|
||||
step "s1-end"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
@ -105,54 +105,54 @@ step "s2-begin"
|
|||
|
||||
step "s2-insert"
|
||||
{
|
||||
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
|
||||
INSERT INTO to_split_table VALUES (123456789, 1);
|
||||
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
|
||||
INSERT INTO to_split_table VALUES (123456789, 1);
|
||||
}
|
||||
|
||||
step "s2-update"
|
||||
{
|
||||
UPDATE to_split_table SET value = 111 WHERE id = 123456789;
|
||||
UPDATE to_split_table SET value = 111 WHERE id = 123456789;
|
||||
}
|
||||
|
||||
step "s2-delete"
|
||||
{
|
||||
DELETE FROM to_split_table WHERE id = 123456789;
|
||||
DELETE FROM to_split_table WHERE id = 123456789;
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
{
|
||||
SELECT count(*) FROM to_split_table WHERE id = 123456789;
|
||||
SELECT count(*) FROM to_split_table WHERE id = 123456789;
|
||||
}
|
||||
|
||||
step "s2-end"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s2-non-blocking-shard-split"
|
||||
{
|
||||
SELECT pg_catalog.citus_split_shard_by_split_points(
|
||||
1500001,
|
||||
ARRAY['-1073741824'],
|
||||
ARRAY[1, 2],
|
||||
'force_logical');
|
||||
SELECT pg_catalog.citus_split_shard_by_split_points(
|
||||
1500001,
|
||||
ARRAY['-1073741824'],
|
||||
ARRAY[1, 2],
|
||||
'force_logical');
|
||||
}
|
||||
|
||||
step "s2-print-locks"
|
||||
{
|
||||
SELECT * FROM master_run_on_worker(
|
||||
ARRAY['localhost']::text[],
|
||||
ARRAY[57637]::int[],
|
||||
ARRAY[
|
||||
'SELECT CONCAT(relation::regclass, ''-'', locktype, ''-'', mode) AS LockInfo FROM pg_locks
|
||||
WHERE relation::regclass::text = ''to_split_table_1500001'';'
|
||||
]::text[],
|
||||
false);
|
||||
ARRAY['localhost']::text[],
|
||||
ARRAY[57637]::int[],
|
||||
ARRAY[
|
||||
'SELECT CONCAT(relation::regclass, ''-'', locktype, ''-'', mode) AS LockInfo FROM pg_locks
|
||||
WHERE relation::regclass::text = ''to_split_table_1500001'';'
|
||||
]::text[],
|
||||
false);
|
||||
}
|
||||
|
||||
step "s2-show-pg_dist_cleanup"
|
||||
{
|
||||
SELECT object_name, object_type, policy_type FROM pg_dist_cleanup;
|
||||
SELECT object_name, object_type, policy_type FROM pg_dist_cleanup;
|
||||
}
|
||||
|
||||
step "s2-show-pg_dist_cleanup-shards"
|
||||
|
@ -163,15 +163,15 @@ step "s2-show-pg_dist_cleanup-shards"
|
|||
|
||||
step "s2-print-cluster"
|
||||
{
|
||||
-- row count per shard
|
||||
SELECT
|
||||
nodeport, shardid, success, result
|
||||
FROM
|
||||
run_command_on_placements('to_split_table', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport, shardid;
|
||||
-- rows
|
||||
SELECT id, value FROM to_split_table ORDER BY id, value;
|
||||
-- row count per shard
|
||||
SELECT
|
||||
nodeport, shardid, success, result
|
||||
FROM
|
||||
run_command_on_placements('to_split_table', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport, shardid;
|
||||
-- rows
|
||||
SELECT id, value FROM to_split_table ORDER BY id, value;
|
||||
}
|
||||
|
||||
|
||||
|
@ -199,12 +199,12 @@ step "s4-begin"
|
|||
|
||||
step "s4-insert"
|
||||
{
|
||||
INSERT INTO to_split_table VALUES (900, 1);
|
||||
INSERT INTO to_split_table VALUES (900, 1);
|
||||
}
|
||||
|
||||
step "s4-end"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -8,26 +8,26 @@ setup
|
|||
{
|
||||
SELECT setval('pg_dist_shardid_seq', 1500000);
|
||||
SET citus.shard_count to 2;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
|
||||
CREATE TABLE reference_table (id int PRIMARY KEY, value int);
|
||||
SELECT create_reference_table('reference_table');
|
||||
CREATE TABLE reference_table (id int PRIMARY KEY, value int);
|
||||
SELECT create_reference_table('reference_table');
|
||||
|
||||
CREATE TABLE table_to_split (id int, value int);
|
||||
SELECT create_distributed_table('table_to_split', 'id');
|
||||
CREATE TABLE table_to_split (id int, value int);
|
||||
SELECT create_distributed_table('table_to_split', 'id');
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
-- Cleanup any orphan shards that might be left over from a previous run.
|
||||
CREATE OR REPLACE FUNCTION run_try_drop_marked_resources()
|
||||
RETURNS VOID
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT VOLATILE;
|
||||
SELECT run_try_drop_marked_resources();
|
||||
-- Cleanup any orphan shards that might be left over from a previous run.
|
||||
CREATE OR REPLACE FUNCTION run_try_drop_marked_resources()
|
||||
RETURNS VOID
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT VOLATILE;
|
||||
SELECT run_try_drop_marked_resources();
|
||||
|
||||
DROP TABLE table_to_split CASCADE;
|
||||
DROP TABLE reference_table CASCADE;
|
||||
DROP TABLE table_to_split CASCADE;
|
||||
DROP TABLE reference_table CASCADE;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -39,75 +39,75 @@ step "s1-begin"
|
|||
|
||||
step "s1-insert"
|
||||
{
|
||||
INSERT INTO reference_table VALUES (5, 10);
|
||||
INSERT INTO reference_table VALUES (5, 10);
|
||||
}
|
||||
|
||||
step "s1-update"
|
||||
{
|
||||
UPDATE reference_table SET value = 5 WHERE id = 5;
|
||||
UPDATE reference_table SET value = 5 WHERE id = 5;
|
||||
}
|
||||
|
||||
step "s1-delete"
|
||||
{
|
||||
DELETE FROM reference_table WHERE id = 5;
|
||||
DELETE FROM reference_table WHERE id = 5;
|
||||
}
|
||||
|
||||
step "s1-ddl"
|
||||
{
|
||||
CREATE INDEX reference_table_index ON reference_table(id);
|
||||
CREATE INDEX reference_table_index ON reference_table(id);
|
||||
}
|
||||
|
||||
step "s1-copy"
|
||||
{
|
||||
COPY reference_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
|
||||
COPY reference_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-non-blocking-shard-split"
|
||||
{
|
||||
SELECT pg_catalog.citus_split_shard_by_split_points(
|
||||
1500002,
|
||||
ARRAY['-1073741824'],
|
||||
ARRAY[1, 2],
|
||||
'force_logical');
|
||||
SELECT pg_catalog.citus_split_shard_by_split_points(
|
||||
1500002,
|
||||
ARRAY['-1073741824'],
|
||||
ARRAY[1, 2],
|
||||
'force_logical');
|
||||
}
|
||||
|
||||
step "s2-add-fkey"
|
||||
{
|
||||
ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id);
|
||||
ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id);
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s2-print-cluster"
|
||||
{
|
||||
-- row count per shard
|
||||
SELECT
|
||||
nodeport, shardid, success, result
|
||||
FROM
|
||||
run_command_on_placements('table_to_split', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport, shardid;
|
||||
-- row count per shard
|
||||
SELECT
|
||||
nodeport, shardid, success, result
|
||||
FROM
|
||||
run_command_on_placements('table_to_split', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport, shardid;
|
||||
|
||||
-- rows of table_to_split
|
||||
SELECT id, value FROM table_to_split ORDER BY id, value;
|
||||
-- rows of table_to_split
|
||||
SELECT id, value FROM table_to_split ORDER BY id, value;
|
||||
|
||||
-- rows of reference table
|
||||
SELECT * FROM reference_table;
|
||||
-- rows of reference table
|
||||
SELECT * FROM reference_table;
|
||||
}
|
||||
|
||||
session "s3"
|
||||
|
|
|
@ -5,25 +5,25 @@
|
|||
// session s3 - Holds advisory locks
|
||||
setup
|
||||
{
|
||||
SET citus.shard_count to 1;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SET citus.shard_count to 1;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SELECT setval('pg_dist_shardid_seq', 1500000);
|
||||
|
||||
CREATE TABLE to_split_table (id int NOT NULL, value int);
|
||||
CREATE UNIQUE INDEX split_table_index ON to_split_table(id);
|
||||
ALTER TABLE to_split_table REPLICA IDENTITY USING INDEX split_table_index;
|
||||
CREATE TABLE to_split_table (id int NOT NULL, value int);
|
||||
CREATE UNIQUE INDEX split_table_index ON to_split_table(id);
|
||||
ALTER TABLE to_split_table REPLICA IDENTITY USING INDEX split_table_index;
|
||||
|
||||
SELECT create_distributed_table('to_split_table', 'id');
|
||||
SELECT create_distributed_table('to_split_table', 'id');
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
-- Cleanup any orphan shards that might be left over from a previous run.
|
||||
CREATE OR REPLACE FUNCTION run_try_drop_marked_resources()
|
||||
RETURNS VOID
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT VOLATILE;
|
||||
SELECT run_try_drop_marked_resources();
|
||||
-- Cleanup any orphan shards that might be left over from a previous run.
|
||||
CREATE OR REPLACE FUNCTION run_try_drop_marked_resources()
|
||||
RETURNS VOID
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT VOLATILE;
|
||||
SELECT run_try_drop_marked_resources();
|
||||
|
||||
DROP TABLE to_split_table CASCADE;
|
||||
}
|
||||
|
@ -33,28 +33,28 @@ session "s1"
|
|||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
// cache all placements
|
||||
step "s1-load-cache"
|
||||
{
|
||||
-- Indirect way to load cache.
|
||||
TRUNCATE to_split_table;
|
||||
-- Indirect way to load cache.
|
||||
TRUNCATE to_split_table;
|
||||
}
|
||||
|
||||
step "s1-non-blocking-shard-split"
|
||||
{
|
||||
SELECT pg_catalog.citus_split_shard_by_split_points(
|
||||
1500001,
|
||||
ARRAY['-1073741824'],
|
||||
ARRAY[2, 2],
|
||||
'force_logical');
|
||||
SELECT pg_catalog.citus_split_shard_by_split_points(
|
||||
1500001,
|
||||
ARRAY['-1073741824'],
|
||||
ARRAY[2, 2],
|
||||
'force_logical');
|
||||
}
|
||||
|
||||
step "s1-end"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
@ -66,41 +66,41 @@ step "s2-begin"
|
|||
|
||||
step "s2-insert"
|
||||
{
|
||||
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
|
||||
INSERT INTO to_split_table VALUES (123456789, 1);
|
||||
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
|
||||
INSERT INTO to_split_table VALUES (123456789, 1);
|
||||
}
|
||||
|
||||
step "s2-update"
|
||||
{
|
||||
UPDATE to_split_table SET value = 111 WHERE id = 123456789;
|
||||
UPDATE to_split_table SET value = 111 WHERE id = 123456789;
|
||||
}
|
||||
|
||||
step "s2-delete"
|
||||
{
|
||||
DELETE FROM to_split_table WHERE id = 123456789;
|
||||
DELETE FROM to_split_table WHERE id = 123456789;
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
{
|
||||
SELECT count(*) FROM to_split_table WHERE id = 123456789;
|
||||
SELECT count(*) FROM to_split_table WHERE id = 123456789;
|
||||
}
|
||||
|
||||
step "s2-end"
|
||||
{
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s2-print-cluster"
|
||||
{
|
||||
-- row count per shard
|
||||
SELECT
|
||||
nodeport, shardid, success, result
|
||||
FROM
|
||||
run_command_on_placements('to_split_table', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport, shardid;
|
||||
-- rows
|
||||
SELECT id, value FROM to_split_table ORDER BY id, value;
|
||||
-- row count per shard
|
||||
SELECT
|
||||
nodeport, shardid, success, result
|
||||
FROM
|
||||
run_command_on_placements('to_split_table', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport, shardid;
|
||||
-- rows
|
||||
SELECT id, value FROM to_split_table ORDER BY id, value;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -5,18 +5,18 @@
|
|||
// create append distributed table to test behavior of COPY in concurrent operations
|
||||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE partitioned_copy(id integer, data text, int_data int) PARTITION BY RANGE (int_data);
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE partitioned_copy(id integer, data text, int_data int) PARTITION BY RANGE (int_data);
|
||||
CREATE TABLE partitioned_copy_0_3 PARTITION OF partitioned_copy FOR VALUES FROM (0) TO (3);
|
||||
CREATE TABLE partitioned_copy_3_6 PARTITION OF partitioned_copy FOR VALUES FROM (3) TO (6);
|
||||
CREATE TABLE partitioned_copy_6_10 PARTITION OF partitioned_copy FOR VALUES FROM (6) TO (10);
|
||||
SELECT create_distributed_table('partitioned_copy', 'id');
|
||||
SELECT create_distributed_table('partitioned_copy', 'id');
|
||||
}
|
||||
|
||||
// drop distributed table
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS partitioned_copy CASCADE;
|
||||
DROP TABLE IF EXISTS partitioned_copy CASCADE;
|
||||
}
|
||||
|
||||
// session 1
|
||||
|
@ -29,8 +29,8 @@ step "s1-router-select" { SELECT * FROM partitioned_copy WHERE id = 1; }
|
|||
step "s1-real-time-select" { SELECT * FROM partitioned_copy ORDER BY 1, 2; }
|
||||
step "s1-adaptive-select"
|
||||
{
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM partitioned_copy AS t1 JOIN partitioned_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM partitioned_copy AS t1 JOIN partitioned_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
}
|
||||
step "s1-insert" { INSERT INTO partitioned_copy VALUES(0, 'k', 0); }
|
||||
step "s1-insert-select" { INSERT INTO partitioned_copy SELECT * FROM partitioned_copy; }
|
||||
|
@ -57,8 +57,8 @@ step "s2-router-select" { SELECT * FROM partitioned_copy WHERE id = 1; }
|
|||
step "s2-real-time-select" { SELECT * FROM partitioned_copy ORDER BY 1, 2; }
|
||||
step "s2-adaptive-select"
|
||||
{
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM partitioned_copy AS t1 JOIN partitioned_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM partitioned_copy AS t1 JOIN partitioned_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
}
|
||||
step "s2-insert" { INSERT INTO partitioned_copy VALUES(0, 'k', 0); }
|
||||
step "s2-insert-select" { INSERT INTO partitioned_copy SELECT * FROM partitioned_copy; }
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
setup
|
||||
{
|
||||
CREATE FUNCTION run_pg_send_cancellation(int,int)
|
||||
RETURNS void
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT;
|
||||
CREATE FUNCTION run_pg_send_cancellation(int,int)
|
||||
RETURNS void
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE FUNCTION get_cancellation_key()
|
||||
RETURNS int
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT;
|
||||
CREATE FUNCTION get_cancellation_key()
|
||||
RETURNS int
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE TABLE cancel_table (pid int, cancel_key int);
|
||||
CREATE TABLE cancel_table (pid int, cancel_key int);
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS cancel_table;
|
||||
DROP TABLE IF EXISTS cancel_table;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
@ -23,15 +23,15 @@ session "s1"
|
|||
/* store the PID and cancellation key of session 1 */
|
||||
step "s1-register"
|
||||
{
|
||||
INSERT INTO cancel_table VALUES (pg_backend_pid(), get_cancellation_key());
|
||||
INSERT INTO cancel_table VALUES (pg_backend_pid(), get_cancellation_key());
|
||||
}
|
||||
|
||||
/* lock the table from session 1, will block and get cancelled */
|
||||
step "s1-lock"
|
||||
{
|
||||
BEGIN;
|
||||
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
|
||||
END;
|
||||
BEGIN;
|
||||
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
|
||||
END;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
@ -39,27 +39,27 @@ session "s2"
|
|||
/* lock the table from session 2 to block session 1 */
|
||||
step "s2-lock"
|
||||
{
|
||||
BEGIN;
|
||||
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
|
||||
BEGIN;
|
||||
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
|
||||
}
|
||||
|
||||
/* PID mismatch */
|
||||
step "s2-wrong-cancel-1"
|
||||
{
|
||||
SELECT run_pg_send_cancellation(pid + 1, cancel_key) FROM cancel_table;
|
||||
SELECT run_pg_send_cancellation(pid + 1, cancel_key) FROM cancel_table;
|
||||
}
|
||||
|
||||
/* cancellation key mismatch */
|
||||
step "s2-wrong-cancel-2"
|
||||
{
|
||||
SELECT run_pg_send_cancellation(pid, cancel_key + 1) FROM cancel_table;
|
||||
SELECT run_pg_send_cancellation(pid, cancel_key + 1) FROM cancel_table;
|
||||
}
|
||||
|
||||
/* cancel the LOCK statement in session 1 */
|
||||
step "s2-cancel"
|
||||
{
|
||||
SELECT run_pg_send_cancellation(pid, cancel_key) FROM cancel_table;
|
||||
END;
|
||||
SELECT run_pg_send_cancellation(pid, cancel_key) FROM cancel_table;
|
||||
END;
|
||||
}
|
||||
|
||||
permutation "s1-register" "s2-lock" "s1-lock" "s2-wrong-cancel-1" "s2-wrong-cancel-2" "s2-cancel"
|
||||
|
|
|
@ -6,50 +6,50 @@
|
|||
|
||||
setup
|
||||
{
|
||||
CREATE FUNCTION create_progress(bigint, bigint)
|
||||
RETURNS void
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT;
|
||||
CREATE FUNCTION create_progress(bigint, bigint)
|
||||
RETURNS void
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE FUNCTION update_progress(bigint, bigint)
|
||||
RETURNS void
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT;
|
||||
CREATE FUNCTION update_progress(bigint, bigint)
|
||||
RETURNS void
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE FUNCTION finish_progress()
|
||||
RETURNS void
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT;
|
||||
CREATE FUNCTION finish_progress()
|
||||
RETURNS void
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE OR REPLACE FUNCTION show_progress(bigint)
|
||||
RETURNS TABLE(step int, progress bigint)
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT;
|
||||
CREATE OR REPLACE FUNCTION show_progress(bigint)
|
||||
RETURNS TABLE(step int, progress bigint)
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE FUNCTION sample_operation(command_type bigint, lockid bigint, progress bigint)
|
||||
RETURNS VOID AS $$
|
||||
BEGIN
|
||||
PERFORM create_progress(command_type, 2);
|
||||
PERFORM pg_advisory_xact_lock(lockid);
|
||||
CREATE FUNCTION sample_operation(command_type bigint, lockid bigint, progress bigint)
|
||||
RETURNS VOID AS $$
|
||||
BEGIN
|
||||
PERFORM create_progress(command_type, 2);
|
||||
PERFORM pg_advisory_xact_lock(lockid);
|
||||
|
||||
PERFORM update_progress(0, progress);
|
||||
PERFORM pg_advisory_xact_lock(lockid + 1);
|
||||
PERFORM update_progress(0, progress);
|
||||
PERFORM pg_advisory_xact_lock(lockid + 1);
|
||||
|
||||
PERFORM update_progress(1, progress);
|
||||
PERFORM pg_advisory_xact_lock(lockid + 2);
|
||||
PERFORM update_progress(1, progress);
|
||||
PERFORM pg_advisory_xact_lock(lockid + 2);
|
||||
|
||||
PERFORM finish_progress();
|
||||
END;
|
||||
$$ LANGUAGE 'plpgsql';
|
||||
PERFORM finish_progress();
|
||||
END;
|
||||
$$ LANGUAGE 'plpgsql';
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP FUNCTION IF EXISTS create_progress(bigint, bigint);
|
||||
DROP FUNCTION IF EXISTS update_progress(bigint, bigint);
|
||||
DROP FUNCTION IF EXISTS finish_progress();
|
||||
DROP FUNCTION IF EXISTS show_progress(bigint);
|
||||
DROP FUNCTION IF EXISTS sample_operation(bigint, bigint, bigint);
|
||||
DROP FUNCTION IF EXISTS create_progress(bigint, bigint);
|
||||
DROP FUNCTION IF EXISTS update_progress(bigint, bigint);
|
||||
DROP FUNCTION IF EXISTS finish_progress();
|
||||
DROP FUNCTION IF EXISTS show_progress(bigint);
|
||||
DROP FUNCTION IF EXISTS sample_operation(bigint, bigint, bigint);
|
||||
}
|
||||
|
||||
|
||||
|
@ -57,7 +57,7 @@ session "s1"
|
|||
|
||||
step "s1-start-operation"
|
||||
{
|
||||
SELECT sample_operation(1337, 10, -1);
|
||||
SELECT sample_operation(1337, 10, -1);
|
||||
}
|
||||
|
||||
|
||||
|
@ -65,7 +65,7 @@ session "s2"
|
|||
|
||||
step "s2-start-operation"
|
||||
{
|
||||
SELECT sample_operation(1337, 20, 2);
|
||||
SELECT sample_operation(1337, 20, 2);
|
||||
}
|
||||
|
||||
|
||||
|
@ -73,7 +73,7 @@ session "s3"
|
|||
|
||||
step "s3-start-operation"
|
||||
{
|
||||
SELECT sample_operation(3778, 30, 9);
|
||||
SELECT sample_operation(3778, 30, 9);
|
||||
}
|
||||
|
||||
|
||||
|
@ -81,44 +81,44 @@ session "lock-orchestrator"
|
|||
|
||||
step "take-locks"
|
||||
{
|
||||
-- Locks for steps of sample operation in s1
|
||||
SELECT pg_advisory_lock(10);
|
||||
SELECT pg_advisory_lock(11);
|
||||
SELECT pg_advisory_lock(12);
|
||||
-- Locks for steps of sample operation in s1
|
||||
SELECT pg_advisory_lock(10);
|
||||
SELECT pg_advisory_lock(11);
|
||||
SELECT pg_advisory_lock(12);
|
||||
|
||||
-- Locks for steps of sample operation in s2
|
||||
SELECT pg_advisory_lock(20);
|
||||
SELECT pg_advisory_lock(21);
|
||||
SELECT pg_advisory_lock(22);
|
||||
-- Locks for steps of sample operation in s2
|
||||
SELECT pg_advisory_lock(20);
|
||||
SELECT pg_advisory_lock(21);
|
||||
SELECT pg_advisory_lock(22);
|
||||
|
||||
-- Locks for steps of sample operation in s3
|
||||
SELECT pg_advisory_lock(30);
|
||||
SELECT pg_advisory_lock(31);
|
||||
SELECT pg_advisory_lock(32);
|
||||
-- Locks for steps of sample operation in s3
|
||||
SELECT pg_advisory_lock(30);
|
||||
SELECT pg_advisory_lock(31);
|
||||
SELECT pg_advisory_lock(32);
|
||||
}
|
||||
|
||||
step "release-locks-1"
|
||||
{
|
||||
-- Release the locks of first steps of sample operations
|
||||
SELECT pg_advisory_unlock(10);
|
||||
SELECT pg_advisory_unlock(20);
|
||||
SELECT pg_advisory_unlock(30);
|
||||
-- Release the locks of first steps of sample operations
|
||||
SELECT pg_advisory_unlock(10);
|
||||
SELECT pg_advisory_unlock(20);
|
||||
SELECT pg_advisory_unlock(30);
|
||||
}
|
||||
|
||||
step "release-locks-2"
|
||||
{
|
||||
-- Release the locks of second steps of sample operations
|
||||
SELECT pg_advisory_unlock(11);
|
||||
SELECT pg_advisory_unlock(21);
|
||||
SELECT pg_advisory_unlock(31);
|
||||
-- Release the locks of second steps of sample operations
|
||||
SELECT pg_advisory_unlock(11);
|
||||
SELECT pg_advisory_unlock(21);
|
||||
SELECT pg_advisory_unlock(31);
|
||||
}
|
||||
|
||||
step "release-locks-3"
|
||||
{
|
||||
-- Release the locks of final steps of sample operations
|
||||
SELECT pg_advisory_unlock(12);
|
||||
SELECT pg_advisory_unlock(22);
|
||||
SELECT pg_advisory_unlock(32);
|
||||
-- Release the locks of final steps of sample operations
|
||||
SELECT pg_advisory_unlock(12);
|
||||
SELECT pg_advisory_unlock(22);
|
||||
SELECT pg_advisory_unlock(32);
|
||||
}
|
||||
|
||||
|
||||
|
@ -126,8 +126,8 @@ session "monitor"
|
|||
|
||||
step "show-progress"
|
||||
{
|
||||
SELECT show_progress(1337);
|
||||
SELECT show_progress(3778);
|
||||
SELECT show_progress(1337);
|
||||
SELECT show_progress(3778);
|
||||
}
|
||||
|
||||
permutation "take-locks" "s1-start-operation" "s2-start-operation" "s3-start-operation" "show-progress" "release-locks-1" "show-progress" "release-locks-2" "show-progress" "release-locks-3"
|
||||
|
|
|
@ -5,20 +5,20 @@
|
|||
// create range distributed table to test behavior of COPY in concurrent operations
|
||||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 3004005;
|
||||
CREATE TABLE range_copy(id integer, data text, int_data int);
|
||||
SELECT create_distributed_table('range_copy', 'id', 'range');
|
||||
SELECT master_create_empty_shard('range_copy');
|
||||
SELECT master_create_empty_shard('range_copy');
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0', shardmaxvalue = '4' WHERE shardid = 3004005;
|
||||
UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '9' WHERE shardid = 3004006;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 3004005;
|
||||
CREATE TABLE range_copy(id integer, data text, int_data int);
|
||||
SELECT create_distributed_table('range_copy', 'id', 'range');
|
||||
SELECT master_create_empty_shard('range_copy');
|
||||
SELECT master_create_empty_shard('range_copy');
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0', shardmaxvalue = '4' WHERE shardid = 3004005;
|
||||
UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '9' WHERE shardid = 3004006;
|
||||
}
|
||||
|
||||
// drop distributed table
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS range_copy CASCADE;
|
||||
DROP TABLE IF EXISTS range_copy CASCADE;
|
||||
}
|
||||
|
||||
// session 1
|
||||
|
@ -31,8 +31,8 @@ step "s1-router-select" { SELECT * FROM range_copy WHERE id = 1; }
|
|||
step "s1-real-time-select" { SELECT * FROM range_copy ORDER BY 1, 2; }
|
||||
step "s1-adaptive-select"
|
||||
{
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM range_copy AS t1 JOIN range_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM range_copy AS t1 JOIN range_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
}
|
||||
step "s1-insert" { INSERT INTO range_copy VALUES(0, 'k', 0); }
|
||||
step "s1-insert-select" { INSERT INTO range_copy SELECT * FROM range_copy; }
|
||||
|
@ -63,8 +63,8 @@ step "s2-router-select" { SELECT * FROM range_copy WHERE id = 1; }
|
|||
step "s2-real-time-select" { SELECT * FROM range_copy ORDER BY 1, 2; }
|
||||
step "s2-adaptive-select"
|
||||
{
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM range_copy AS t1 JOIN range_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM range_copy AS t1 JOIN range_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
}
|
||||
step "s2-insert" { INSERT INTO range_copy VALUES(0, 'k', 0); }
|
||||
step "s2-insert-select" { INSERT INTO range_copy SELECT * FROM range_copy; }
|
||||
|
@ -81,13 +81,14 @@ step "s2-ddl-rename-column" { ALTER TABLE range_copy RENAME data TO new_column;
|
|||
step "s2-table-size" { SELECT citus_total_relation_size('range_copy'); }
|
||||
step "s2-master-modify-multiple-shards" { DELETE FROM range_copy; }
|
||||
step "s2-master-drop-all-shards" { SELECT citus_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); }
|
||||
step "s2-distribute-table" {
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 3004005;
|
||||
SELECT create_distributed_table('range_copy', 'id', 'range');
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0', shardmaxvalue = '4' WHERE shardid = 3004005;
|
||||
UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '9' WHERE shardid = 3004006;
|
||||
}
|
||||
step "s2-distribute-table"
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 3004005;
|
||||
SELECT create_distributed_table('range_copy', 'id', 'range');
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0', shardmaxvalue = '4' WHERE shardid = 3004005;
|
||||
UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '9' WHERE shardid = 3004006;
|
||||
}
|
||||
// We use this as a way to wait for s2-ddl-create-index-concurrently to
|
||||
// complete. We know it can complete after s1-commit has succeeded, this way we
|
||||
// make sure no other query is run over session s1 before that happens.
|
||||
|
|
|
@ -25,27 +25,27 @@ setup
|
|||
LANGUAGE C STRICT VOLATILE
|
||||
AS 'citus', $$stop_session_level_connection_to_node$$;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE isolation_cleanup_orphaned_resources()
|
||||
LANGUAGE C
|
||||
AS 'citus', $$isolation_cleanup_orphaned_resources$$;
|
||||
COMMENT ON PROCEDURE isolation_cleanup_orphaned_resources()
|
||||
IS 'cleanup orphaned shards';
|
||||
CREATE OR REPLACE PROCEDURE isolation_cleanup_orphaned_resources()
|
||||
LANGUAGE C
|
||||
AS 'citus', $$isolation_cleanup_orphaned_resources$$;
|
||||
COMMENT ON PROCEDURE isolation_cleanup_orphaned_resources()
|
||||
IS 'cleanup orphaned shards';
|
||||
RESET citus.enable_metadata_sync;
|
||||
|
||||
CALL isolation_cleanup_orphaned_resources();
|
||||
SET citus.next_shard_id to 120000;
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE t1 (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('t1', 'x');
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE t1 (x int PRIMARY KEY, y int);
|
||||
SELECT create_distributed_table('t1', 'x');
|
||||
|
||||
SELECT get_shard_id_for_distribution_column('t1', 15) INTO selected_shard;
|
||||
SELECT get_shard_id_for_distribution_column('t1', 15) INTO selected_shard;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE t1;
|
||||
DROP TABLE selected_shard;
|
||||
DROP TABLE t1;
|
||||
}
|
||||
|
||||
|
||||
|
@ -82,7 +82,7 @@ step "s2-start-session-level-connection"
|
|||
|
||||
step "s2-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
step "s2-lock-table-on-worker"
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
setup
|
||||
{
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 8429800;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 8429800;
|
||||
CREATE TABLE ref_table_1(id int PRIMARY KEY, value int);
|
||||
SELECT create_reference_table('ref_table_1');
|
||||
SELECT create_reference_table('ref_table_1');
|
||||
|
||||
CREATE TABLE ref_table_2(id int PRIMARY KEY, value int REFERENCES ref_table_1(id) ON DELETE CASCADE ON UPDATE CASCADE);
|
||||
SELECT create_reference_table('ref_table_2');
|
||||
SELECT create_reference_table('ref_table_2');
|
||||
|
||||
CREATE TABLE ref_table_3(id int PRIMARY KEY, value int REFERENCES ref_table_2(id) ON DELETE CASCADE ON UPDATE CASCADE);
|
||||
SELECT create_reference_table('ref_table_3');
|
||||
SELECT create_reference_table('ref_table_3');
|
||||
|
||||
INSERT INTO ref_table_1 VALUES (1, 1), (3, 3), (5, 5);
|
||||
INSERT INTO ref_table_2 SELECT * FROM ref_table_1;
|
||||
|
@ -17,14 +17,14 @@ setup
|
|||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE ref_table_1, ref_table_2, ref_table_3;
|
||||
DROP TABLE ref_table_1, ref_table_2, ref_table_3;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-delete-table-2"
|
||||
|
@ -106,7 +106,7 @@ session "s2"
|
|||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-insert-table-1"
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
setup
|
||||
{
|
||||
SET citus.shard_count TO 2;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
CREATE TABLE ref_table_1(id int PRIMARY KEY, value int);
|
||||
SELECT create_reference_table('ref_table_1');
|
||||
SELECT create_reference_table('ref_table_1');
|
||||
|
||||
CREATE TABLE ref_table_2(id int PRIMARY KEY REFERENCES ref_table_1(id) ON DELETE CASCADE ON UPDATE CASCADE);
|
||||
SELECT create_reference_table('ref_table_2');
|
||||
SELECT create_reference_table('ref_table_2');
|
||||
|
||||
CREATE TABLE dist_table(id int PRIMARY KEY, value int REFERENCES ref_table_2(id) ON DELETE CASCADE ON UPDATE CASCADE);
|
||||
SELECT create_distributed_table('dist_table', 'id');
|
||||
|
@ -21,14 +21,14 @@ setup
|
|||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE ref_table_1, ref_table_2, dist_table, selected_shard_for_dist_table;
|
||||
DROP TABLE ref_table_1, ref_table_2, dist_table, selected_shard_for_dist_table;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-delete-table-1"
|
||||
|
@ -66,7 +66,7 @@ session "s2"
|
|||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-move-shards"
|
||||
|
|
|
@ -3,13 +3,13 @@
|
|||
setup
|
||||
{
|
||||
CREATE TABLE ref_table_1(id int PRIMARY KEY, value int);
|
||||
SELECT create_reference_table('ref_table_1');
|
||||
SELECT create_reference_table('ref_table_1');
|
||||
|
||||
CREATE TABLE ref_table_2(id int PRIMARY KEY, value int REFERENCES ref_table_1(id) ON DELETE CASCADE ON UPDATE CASCADE);
|
||||
SELECT create_reference_table('ref_table_2');
|
||||
SELECT create_reference_table('ref_table_2');
|
||||
|
||||
CREATE TABLE ref_table_3(id int PRIMARY KEY, value int REFERENCES ref_table_2(id) ON DELETE CASCADE ON UPDATE CASCADE);
|
||||
SELECT create_reference_table('ref_table_3');
|
||||
SELECT create_reference_table('ref_table_3');
|
||||
|
||||
INSERT INTO ref_table_1 VALUES (1, 1), (3, 3), (5, 5);
|
||||
INSERT INTO ref_table_2 SELECT * FROM ref_table_1;
|
||||
|
@ -18,43 +18,43 @@ setup
|
|||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE ref_table_1, ref_table_2, ref_table_3;
|
||||
DROP TABLE ref_table_1, ref_table_2, ref_table_3;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-start-session-level-connection"
|
||||
{
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
}
|
||||
|
||||
step "s1-view-locks"
|
||||
{
|
||||
SELECT * FROM master_run_on_worker(
|
||||
ARRAY['localhost']::text[],
|
||||
ARRAY[57637]::int[],
|
||||
ARRAY[$$
|
||||
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
|
||||
(SELECT mode, count(*) count FROM pg_locks
|
||||
WHERE locktype='advisory' GROUP BY mode ORDER BY 1, 2) t$$]::text[],
|
||||
false);
|
||||
ARRAY['localhost']::text[],
|
||||
ARRAY[57637]::int[],
|
||||
ARRAY[$$
|
||||
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
|
||||
(SELECT mode, count(*) count FROM pg_locks
|
||||
WHERE locktype='advisory' GROUP BY mode ORDER BY 1, 2) t$$]::text[],
|
||||
false);
|
||||
}
|
||||
|
||||
step "s1-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-start-session-level-connection"
|
||||
{
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
}
|
||||
|
||||
step "s2-begin-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
}
|
||||
|
||||
step "s2-insert-table-1"
|
||||
|
@ -109,7 +109,7 @@ step "s2-rollback-worker"
|
|||
|
||||
step "s2-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
// Case 1. UPDATE/DELETE ref_table_1 should only lock its own shard in Exclusive mode.
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
|
||||
setup
|
||||
{
|
||||
CREATE TABLE ref_table(id integer, value integer);
|
||||
SELECT create_reference_table('ref_table');
|
||||
CREATE TABLE ref_table(id integer, value integer);
|
||||
SELECT create_reference_table('ref_table');
|
||||
}
|
||||
|
||||
// Create and use UDF to close the connection opened in the setup step. Also return the cluster
|
||||
|
@ -29,17 +29,17 @@ step "s1-begin-on-worker"
|
|||
|
||||
step "s1-select-for-update"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE');
|
||||
}
|
||||
|
||||
step "s1-commit-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
}
|
||||
|
||||
step "s1-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
|
||||
|
@ -59,42 +59,42 @@ step "s2-begin-on-worker"
|
|||
|
||||
step "s2-insert"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES (1, 10), (2, 20)');
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES (1, 10), (2, 20)');
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2');
|
||||
}
|
||||
|
||||
step "s2-insert-select-ref-table"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table SELECT * FROM ref_table');
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table SELECT * FROM ref_table');
|
||||
}
|
||||
|
||||
step "s2-copy"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 1, 10 && echo 2, 20''WITH CSV');
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 1, 10 && echo 2, 20''WITH CSV');
|
||||
}
|
||||
|
||||
step "s2-alter"
|
||||
{
|
||||
ALTER TABLE ref_table DROP value;
|
||||
ALTER TABLE ref_table DROP value;
|
||||
}
|
||||
|
||||
step "s2-truncate"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('TRUNCATE ref_table');
|
||||
SELECT run_commands_on_session_level_connection_to_node('TRUNCATE ref_table');
|
||||
}
|
||||
|
||||
step "s2-select-for-update"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE');
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE');
|
||||
}
|
||||
|
||||
step "s2-coordinator-create-index-concurrently"
|
||||
{
|
||||
CREATE INDEX CONCURRENTLY ref_table_index ON ref_table(id);
|
||||
CREATE INDEX CONCURRENTLY ref_table_index ON ref_table(id);
|
||||
}
|
||||
|
||||
step "s2-commit-worker"
|
||||
|
@ -119,7 +119,7 @@ session "s3"
|
|||
|
||||
step "s3-select-count"
|
||||
{
|
||||
SELECT COUNT(*) FROM ref_table;
|
||||
SELECT COUNT(*) FROM ref_table;
|
||||
}
|
||||
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue