diff --git a/.circleci/config.yml b/.circleci/config.yml index f75d809e5..e8710cc08 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -34,6 +34,12 @@ jobs: - run: name: 'Check if changed' command: git diff --cached --exit-code + - run: + name: 'Normalize test output' + command: ci/normalize_expected.sh + - run: + name: 'Check if changed' + command: git diff --cached --exit-code check-sql-snapshots: docker: - image: 'citus/extbuilder:latest' diff --git a/ci/normalize_expected.sh b/ci/normalize_expected.sh new file mode 100755 index 000000000..431ff83a6 --- /dev/null +++ b/ci/normalize_expected.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +set -eu +for f in $(git ls-tree -r HEAD --name-only src/test/regress/expected/*.out); do + sed -Ef src/test/regress/bin/normalize.sed < "$f" > "$f.modified" + mv "$f.modified" "$f" +done diff --git a/src/test/regress/.gitignore b/src/test/regress/.gitignore index 42f938a46..e5da6ab73 100644 --- a/src/test/regress/.gitignore +++ b/src/test/regress/.gitignore @@ -22,3 +22,7 @@ # python *.pyc + +# output from diff normalization that shouldn't be commited +*.unmodified +*.modified diff --git a/src/test/regress/README.md b/src/test/regress/README.md new file mode 100644 index 000000000..3ed6f70e7 --- /dev/null +++ b/src/test/regress/README.md @@ -0,0 +1,99 @@ +# How our testing works + +We use the test tooling of postgres to run our tests. This tooling is very +simple but effective. The basics it runs a series of `.sql` scripts, gets +their output and stores that in `results/$sqlfilename.out`. It then compares the +actual output to the expected output with a simple `diff` command: + +```bash +diff results/$sqlfilename.out expected/$sqlfilename.out +``` + +## Schedules + +Which sql scripts to run is defined in a schedule file, e.g. `multi_schedule`, +`multi_mx_schedule`. + +## Makefile + +In our `Makefile` we have rules to run the different types of test schedules. +You can run them from the root of the repository like so: +```bash +# e.g. the multi_schedule +make install -j9 && make -C src/test/regress/ check-multi +``` + +Take a look at the makefile for a list of all the testing targets. + +### Running a specific test + +Often you want to run a specific test and don't want to run everything. You can +use one of the following commands to do so: +```bash +# If your tests needs almost no setup you can use check-minimal +make install -j9 && make -C src/test/regress/ check-minimal EXTRA_TESTS='multi_utility_warnings' +# Often tests need some testing data, if you get missing table errors using +# check-minimal you should try check-base +make install -j9 && make -C src/test/regress/ check-base EXTRA_TESTS='with_prepare' +# Sometimes this is still not enough and some other test needs to be run before +# the test you want to run. You can do so by adding it to EXTRA_TESTS too. +make install -j9 && make -C src/test/regress/ check-base EXTRA_TESTS='add_coordinator coordinator_shouldhaveshards' +``` + +## Normalization + +The output of tests is sadly not completely predictable. Still we want to +compare the output of different runs and error when the important things are +different. We do this by not using the regular system `diff` to compare files. +Instead we use `src/test/regress/bin/diff` which does the following things: + +1. Change the `$sqlfilename.out` file by running it through `sed` using the + `src/test/regress/bin/normalize.sed` file. This does stuff like replacing + numbers that keep changing across runs with an `XXX` string, e.g. portnumbers + or transaction numbers. +2. Backup the original output to `$sqlfilename.out.unmodified` in case it's + needed for debugging +3. Compare the changed `results` and `expected` files with the system `diff` + command. + + +## Updating the expected test output + +Sometimes you add a test to an existing file, or test output changes in a way +that's not bad (possibly even good if support for queries is added). In those +cases you want to update the expected test output. +The way to do this is very simple, you run the test and copy the new .out file +in the `results` directory to the `expected` directory, e.g.: + +```bash +make install -j9 && make -C src/test/regress/ check-minimal EXTRA_TESTS='multi_utility_warnings' +cp src/test/regress/{results,expected}/multi_utility_warnings.out +``` + + +## Adding a new test file + +Adding a new test file is quite simple: + +1. Write the SQL file in the `sql` directory +2. Add it to a schedule file, to make sure it's run in CI +3. Run the test +4. Check that the output is as expected +5. Copy the `.out` file from `results` to `expected` + +## Isolation testing +See [`src/test/regress/spec/README.md`](https://github.com/citusdata/citus/blob/master/src/test/regress/spec/README.md) + +## Upgrade testing +See [`src/test/regress/upgrade/README.md`](https://github.com/citusdata/citus/blob/master/src/test/regress/upgrade/README.md) + +## Failure testing + +See [`src/test/regress/mitmscripts/README.md`](https://github.com/citusdata/citus/blob/master/src/test/regress/mitmscripts/README.md) + +## Perl test setup script + +To automatically setup a citus cluster in tests we use our +`src/test/regress/pg_regress_multi.pl` script. This sets up a citus cluster and +then starts the standard postgres test tooling. You almost never have to change +this file. diff --git a/src/test/regress/bin/diff b/src/test/regress/bin/diff index 0f7c4cc6f..1cbb0d227 100755 --- a/src/test/regress/bin/diff +++ b/src/test/regress/bin/diff @@ -9,6 +9,7 @@ # # Note that src/test/regress/Makefile adds this directory to $PATH so # pg_regress uses this diff tool instead of the system diff tool. +set -eu -o pipefail file1="${@:(-2):1}" file2="${@:(-1):1}" @@ -29,13 +30,17 @@ then DIFF=/usr/bin/diff fi -if test -z "$VANILLATEST" +if test -z "${VANILLATEST:-}" then - sed -Ef $BASEDIR/normalize.sed < $file1 > $file1.modified - sed -Ef $BASEDIR/normalize.sed < $file2 > $file2.modified - $DIFF -w $args $file1.modified $file2.modified + touch "$file1" # when adding a new test the expected file does not exist + sed -Ef $BASEDIR/normalize.sed < $file1 > "$file1.modified" + mv "$file1" "$file1.unmodified" + mv "$file1.modified" "$file1" + sed -Ef $BASEDIR/normalize.sed < "$file2" > "$file2.modified" + mv "$file2" "$file2.unmodified" + mv "$file2.modified" "$file2" + $DIFF -w $args $file1 $file2 exitcode=$? - rm -f $file1.modified $file2.modified exit $exitcode else exec $DIFF -w $args $file1 $file2 diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index a4a4091f5..2993aa100 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -15,8 +15,7 @@ s/assigned task [0-9]+ to node/assigned task to node/ s/node group [12] (but|does)/node group \1/ # Differing names can have differing table column widths -s/(-+\|)+-+/---/g -s/.*-------------.*/---------------------------------------------------------------------/g +s/^-[+-]{2,}$/---------------------------------------------------------------------/g # In foreign_key_to_reference_table, normalize shard table names, etc in # the generated plan @@ -45,9 +44,6 @@ s/name_len_12345678901234567890123456789012345678_fcd8ab6f_[0-9]+/name_len_12345 # normalize pkey constraints in multi_insert_select.sql s/"(raw_events_second_user_id_value_1_key_|agg_events_user_id_value_1_agg_key_)[0-9]+"/"\1xxxxxxx"/g -# normalize failed task ids -s/ERROR: failed to execute task [0-9]+/ERROR: failed to execute task X/g - # ignore could not consume warnings /WARNING: could not consume data from worker node/d @@ -65,6 +61,9 @@ s/"(ref_table_[0-9]_|ref_table_[0-9]_value_fkey_)[0-9]+"/"\1xxxxxxx"/g /^LINE [0-9]+:.*$/d /^ *\^$/d +# Remove trailing whitespace +s/ *$//g + # pg12 changes s/Partitioned table "/Table "/g s/\) TABLESPACE pg_default$/\)/g @@ -76,3 +75,10 @@ s/_id_other_column_ref_fkey/_id_fkey/g # intermediate_results s/(ERROR.*)pgsql_job_cache\/([0-9]+_[0-9]+_[0-9]+)\/(.*).data/\1pgsql_job_cache\/xx_x_xxx\/\3.data/g + +# Plan numbers are not very stable, so we normalize those +# subplan numbers are quite stable so we keep those +s/DEBUG: Plan [0-9]+/DEBUG: Plan XXX/g +s/generating subplan [0-9]+\_/generating subplan XXX\_/g +s/read_intermediate_result\('[0-9]+_/read_intermediate_result('XXX_/g +s/Subplan [0-9]+\_/Subplan XXX\_/g diff --git a/src/test/regress/expected/adaptive_executor.out b/src/test/regress/expected/adaptive_executor.out index 9d3ec647f..d8ebc7d95 100644 --- a/src/test/regress/expected/adaptive_executor.out +++ b/src/test/regress/expected/adaptive_executor.out @@ -5,9 +5,9 @@ SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 801009000; SELECT create_distributed_table('test','x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test VALUES (1,2); @@ -18,8 +18,8 @@ SET citus.max_adaptive_executor_pool_size TO 2; SET citus.task_executor_type TO 'adaptive'; BEGIN; SELECT count(*) FROM test a JOIN (SELECT x, pg_sleep(0.1) FROM test) b USING (x); - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -27,8 +27,8 @@ SELECT sum(result::bigint) FROM run_command_on_workers($$ SELECT count(*) FROM pg_stat_activity WHERE pid <> pg_backend_pid() AND query LIKE '%8010090%' $$); - sum ------ + sum +--------------------------------------------------------------------- 2 (1 row) @@ -37,8 +37,8 @@ END; SET citus.executor_slow_start_interval TO '10ms'; BEGIN; SELECT count(*) FROM test a JOIN (SELECT x, pg_sleep(0.1) FROM test) b USING (x); - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -46,8 +46,8 @@ SELECT sum(result::bigint) FROM run_command_on_workers($$ SELECT count(*) FROM pg_stat_activity WHERE pid <> pg_backend_pid() AND query LIKE '%8010090%' $$); - sum ------ + sum +--------------------------------------------------------------------- 4 (1 row) diff --git a/src/test/regress/expected/adaptive_executor_repartition.out b/src/test/regress/expected/adaptive_executor_repartition.out index ab40dd4c0..f0f22eaa6 100644 --- a/src/test/regress/expected/adaptive_executor_repartition.out +++ b/src/test/regress/expected/adaptive_executor_repartition.out @@ -5,48 +5,48 @@ SET citus.shard_replication_factor to 1; SET citus.enable_repartition_joins TO true; CREATE TABLE ab(a int, b int); SELECT create_distributed_table('ab', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO ab SELECT *,* FROM generate_series(1,10); SELECT COUNT(*) FROM ab k, ab l WHERE k.a = l.b; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) SELECT COUNT(*) FROM ab k, ab l, ab m, ab t WHERE k.a = l.b AND k.a = m.b AND t.b = l.a; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM (SELECT k.a FROM ab k, ab l WHERE k.a = l.b) first, (SELECT * FROM ab) second WHERE first.a = second.b; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) BEGIN; SELECT count(*) FROM (SELECT k.a FROM ab k, ab l WHERE k.a = l.b) first, (SELECT * FROM ab) second WHERE first.a = second.b; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM (SELECT k.a FROM ab k, ab l WHERE k.a = l.b) first, (SELECT * FROM ab) second WHERE first.a = second.b; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM (SELECT k.a FROM ab k, ab l WHERE k.a = l.b) first, (SELECT * FROM ab) second WHERE first.a = second.b; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) @@ -62,21 +62,21 @@ CREATE TABLE single_hash_repartition_first (id int, sum int, avg float); CREATE TABLE single_hash_repartition_second (id int, sum int, avg float); CREATE TABLE ref_table (id int, sum int, avg float); SELECT create_distributed_table('single_hash_repartition_first', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('single_hash_repartition_second', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('ref_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- single hash repartition after bcast joins @@ -86,8 +86,8 @@ FROM ref_table r1, single_hash_repartition_second t1, single_hash_repartition_first t2 WHERE r1.id = t1.id AND t2.sum = t1.id; - QUERY PLAN ----------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Task Count: 4 @@ -104,8 +104,8 @@ FROM single_hash_repartition_first t1, single_hash_repartition_first t2, single_hash_repartition_second t3 WHERE t1.id = t2.id AND t1.sum = t3.id; - QUERY PLAN ----------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Task Count: 4 diff --git a/src/test/regress/expected/add_coordinator.out b/src/test/regress/expected/add_coordinator.out index 41d89db06..d1e5d7cd8 100644 --- a/src/test/regress/expected/add_coordinator.out +++ b/src/test/regress/expected/add_coordinator.out @@ -4,8 +4,8 @@ SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset -- adding the same node again should return the existing nodeid SELECT master_add_node('localhost', :master_port, groupid => 0) = :master_nodeid; - ?column? ----------- + ?column? +--------------------------------------------------------------------- t (1 row) @@ -14,9 +14,9 @@ SELECT master_add_node('localhost', 12345, groupid => 0) = :master_nodeid; ERROR: group 0 already has a primary node -- start_metadata_sync_to_node() for coordinator should raise a notice SELECT start_metadata_sync_to_node('localhost', :master_port); -NOTICE: localhost:57636 is the coordinator and already contains metadata, skipping syncing the metadata - start_metadata_sync_to_node ------------------------------ - +NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) diff --git a/src/test/regress/expected/aggregate_support.out b/src/test/regress/expected/aggregate_support.out index e5b8eeec9..5b86da4b3 100644 --- a/src/test/regress/expected/aggregate_support.out +++ b/src/test/regress/expected/aggregate_support.out @@ -39,47 +39,47 @@ create aggregate sum2_strict (int) ( combinefunc = sum2_sfunc_strict ); select create_distributed_function('sum2(int)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) select create_distributed_function('sum2_strict(int)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) create table aggdata (id int, key int, val int, valf float8); select create_distributed_table('aggdata', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) insert into aggdata (id, key, val, valf) values (1, 1, 2, 11.2), (2, 1, NULL, 2.1), (3, 2, 2, 3.22), (4, 2, 3, 4.23), (5, 2, 5, 5.25), (6, 3, 4, 63.4), (7, 5, NULL, 75), (8, 6, NULL, NULL), (9, 6, NULL, 96), (10, 7, 8, 1078), (11, 9, 0, 1.19); select key, sum2(val), sum2_strict(val), stddev(valf) from aggdata group by key order by key; - key | sum2 | sum2_strict | stddev ------+------+-------------+------------------ + key | sum2 | sum2_strict | stddev +--------------------------------------------------------------------- 1 | | 4 | 6.43467170879758 2 | 20 | 20 | 1.01500410508201 - 3 | 8 | 8 | - 5 | | | - 6 | | | - 7 | 16 | 16 | - 9 | 0 | 0 | + 3 | 8 | 8 | + 5 | | | + 6 | | | + 7 | 16 | 16 | + 9 | 0 | 0 | (7 rows) -- FILTER supported select key, sum2(val) filter (where valf < 5), sum2_strict(val) filter (where valf < 5) from aggdata group by key order by key; - key | sum2 | sum2_strict ------+------+------------- - 1 | | + key | sum2 | sum2_strict +--------------------------------------------------------------------- + 1 | | 2 | 10 | 10 - 3 | 0 | - 5 | 0 | - 6 | 0 | - 7 | 0 | + 3 | 0 | + 5 | 0 | + 6 | 0 | + 7 | 0 | 9 | 0 | 0 (7 rows) @@ -88,17 +88,17 @@ select key, sum2(distinct val), sum2_strict(distinct val) from aggdata group by ERROR: cannot compute aggregate (distinct) DETAIL: table partitioning is unsuitable for aggregate (distinct) select id, sum2(distinct val), sum2_strict(distinct val) from aggdata group by id order by id; - id | sum2 | sum2_strict -----+------+------------- + id | sum2 | sum2_strict +--------------------------------------------------------------------- 1 | 4 | 4 - 2 | | + 2 | | 3 | 4 | 4 4 | 6 | 6 5 | 10 | 10 6 | 8 | 8 - 7 | | - 8 | | - 9 | | + 7 | | + 8 | | + 9 | | 10 | 16 | 16 11 | 0 | 0 (11 rows) @@ -108,9 +108,9 @@ select key, sum2(val order by valf), sum2_strict(val order by valf) from aggdata ERROR: unsupported aggregate function sum2 -- Test handling a lack of intermediate results select sum2(val), sum2_strict(val) from aggdata where valf = 0; - sum2 | sum2_strict -------+------------- - 0 | + sum2 | sum2_strict +--------------------------------------------------------------------- + 0 | (1 row) -- test polymorphic aggregates from https://github.com/citusdata/citus/issues/2397 @@ -136,15 +136,15 @@ CREATE AGGREGATE last ( combinefunc = last_agg ); SELECT create_distributed_function('first(anyelement)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_function('last(anyelement)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT key, first(val ORDER BY id), last(val ORDER BY id) @@ -153,17 +153,17 @@ ERROR: unsupported aggregate function first -- However, GROUP BY on distribution column gets pushed down SELECT id, first(val ORDER BY key), last(val ORDER BY key) FROM aggdata GROUP BY id ORDER BY id; - id | first | last -----+-------+------ + id | first | last +--------------------------------------------------------------------- 1 | 2 | 2 - 2 | | + 2 | | 3 | 2 | 2 4 | 3 | 3 5 | 5 | 5 6 | 4 | 4 - 7 | | - 8 | | - 9 | | + 7 | | + 8 | | + 9 | | 10 | 8 | 8 11 | 0 | 0 (11 rows) @@ -187,16 +187,16 @@ create aggregate sumstring(text) ( ); select sumstring(valf::text) from aggdata where valf is not null; ERROR: function "aggregate_support.sumstring(text)" does not exist -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx select create_distributed_function('sumstring(text)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) select sumstring(valf::text) from aggdata where valf is not null; - sumstring ------------ + sumstring +--------------------------------------------------------------------- 1339.59 (1 row) @@ -213,14 +213,14 @@ create aggregate array_collect_sort(el int) ( initcond = '{}' ); select create_distributed_function('array_collect_sort(int)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) select array_collect_sort(val) from aggdata; - array_collect_sort -------------------------------------- + array_collect_sort +--------------------------------------------------------------------- {0,2,2,3,4,5,8,NULL,NULL,NULL,NULL} (1 row) @@ -229,8 +229,8 @@ create user notsuper; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. select run_command_on_workers($$create user notsuper$$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -241,16 +241,16 @@ select run_command_on_workers($$ grant all on schema aggregate_support to notsuper; grant all on all tables in schema aggregate_support to notsuper; $$); - run_command_on_workers ---------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,GRANT) (localhost,57638,t,GRANT) (2 rows) set role notsuper; select array_collect_sort(val) from aggdata; - array_collect_sort -------------------------------------- + array_collect_sort +--------------------------------------------------------------------- {0,2,2,3,4,5,8,NULL,NULL,NULL,NULL} (1 row) diff --git a/src/test/regress/expected/alter_role_propagation.out b/src/test/regress/expected/alter_role_propagation.out index 745a4824a..0d31dc733 100644 --- a/src/test/regress/expected/alter_role_propagation.out +++ b/src/test/regress/expected/alter_role_propagation.out @@ -4,8 +4,8 @@ CREATE ROLE alter_role_1 WITH LOGIN; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE ROLE alter_role_1 WITH LOGIN;$$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -14,31 +14,31 @@ SELECT run_command_on_workers($$CREATE ROLE alter_role_1 WITH LOGIN;$$); ALTER ROLE alter_role_1 WITH SUPERUSER NOSUPERUSER; ERROR: conflicting or redundant options -- make sure that we propagate all options accurately -ALTER ROLE alter_role_1 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05'; +ALTER ROLE alter_role_1 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05'; SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'; - row ---------------------------------------- + row +--------------------------------------------------------------------- (alter_role_1,t,t,t,t,t,t,t,66,,2032) (1 row) SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers -------------------------------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"(alter_role_1,t,t,t,t,t,t,t,66,,2032)") (localhost,57638,t,"(alter_role_1,t,t,t,t,t,t,t,66,,2032)") (2 rows) -- make sure that we propagate all options accurately -ALTER ROLE alter_role_1 WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 0 VALID UNTIL '2052-05-05'; +ALTER ROLE alter_role_1 WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 0 VALID UNTIL '2052-05-05'; SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'; - row --------------------------------------- + row +--------------------------------------------------------------------- (alter_role_1,f,f,f,f,f,f,f,0,,2052) (1 row) SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers ------------------------------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"(alter_role_1,f,f,f,f,f,f,f,0,,2052)") (localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,,2052)") (2 rows) @@ -46,19 +46,19 @@ SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcr -- make sure that non-existent users are handled properly ALTER ROLE alter_role_2 WITH SUPERUSER NOSUPERUSER; ERROR: conflicting or redundant options -ALTER ROLE alter_role_2 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05'; +ALTER ROLE alter_role_2 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05'; ERROR: role "alter_role_2" does not exist -- make sure that CURRENT_USER just works fine ALTER ROLE CURRENT_USER WITH CONNECTION LIMIT 123; SELECT rolconnlimit FROM pg_authid WHERE rolname = CURRENT_USER; - rolconnlimit --------------- + rolconnlimit +--------------------------------------------------------------------- 123 (1 row) SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname = CURRENT_USER;$$); - run_command_on_workers -------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,123) (localhost,57638,t,123) (2 rows) @@ -66,14 +66,14 @@ SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname -- make sure that SESSION_USER just works fine ALTER ROLE SESSION_USER WITH CONNECTION LIMIT 124; SELECT rolconnlimit FROM pg_authid WHERE rolname = SESSION_USER; - rolconnlimit --------------- + rolconnlimit +--------------------------------------------------------------------- 124 (1 row) SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname = SESSION_USER;$$); - run_command_on_workers -------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,124) (localhost,57638,t,124) (2 rows) @@ -81,56 +81,56 @@ SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname -- now lets test the passwords in more detail ALTER ROLE alter_role_1 WITH PASSWORD NULL; SELECT rolpassword is NULL FROM pg_authid WHERE rolname = 'alter_role_1'; - ?column? ----------- + ?column? +--------------------------------------------------------------------- t (1 row) SELECT run_command_on_workers($$SELECT rolpassword is NULL FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,t) (localhost,57638,t,t) (2 rows) ALTER ROLE alter_role_1 WITH PASSWORD 'test1'; SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'; - rolpassword -------------------------------------- + rolpassword +--------------------------------------------------------------------- md52f9cc8d65e37edcc45c4a489bdfc699d (1 row) SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers ---------------------------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,md52f9cc8d65e37edcc45c4a489bdfc699d) (localhost,57638,t,md52f9cc8d65e37edcc45c4a489bdfc699d) (2 rows) ALTER ROLE alter_role_1 WITH ENCRYPTED PASSWORD 'test2'; SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'; - rolpassword -------------------------------------- + rolpassword +--------------------------------------------------------------------- md5e17f7818c5ec023fa87bdb97fd3e842e (1 row) SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers ---------------------------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,md5e17f7818c5ec023fa87bdb97fd3e842e) (localhost,57638,t,md5e17f7818c5ec023fa87bdb97fd3e842e) (2 rows) ALTER ROLE alter_role_1 WITH ENCRYPTED PASSWORD 'md59cce240038b7b335c6aa9674a6f13e72'; SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'; - rolpassword -------------------------------------- + rolpassword +--------------------------------------------------------------------- md59cce240038b7b335c6aa9674a6f13e72 (1 row) SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers ---------------------------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,md59cce240038b7b335c6aa9674a6f13e72) (localhost,57638,t,md59cce240038b7b335c6aa9674a6f13e72) (2 rows) @@ -140,22 +140,22 @@ CREATE ROLE "alter_role'1" WITH LOGIN; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE ROLE "alter_role'1" WITH LOGIN;$$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) ALTER ROLE "alter_role'1" CREATEROLE; SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role''1'; - rolcreaterole ---------------- + rolcreaterole +--------------------------------------------------------------------- t (1 row) SELECT run_command_on_workers($$SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role''1'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,t) (localhost,57638,t,t) (2 rows) @@ -164,75 +164,75 @@ CREATE ROLE "alter_role""1" WITH LOGIN; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE ROLE "alter_role""1" WITH LOGIN;$$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) ALTER ROLE "alter_role""1" CREATEROLE; SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role"1'; - rolcreaterole ---------------- + rolcreaterole +--------------------------------------------------------------------- t (1 row) SELECT run_command_on_workers($$SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role"1'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,t) (localhost,57638,t,t) (2 rows) -- add node -ALTER ROLE alter_role_1 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05' PASSWORD 'test3'; +ALTER ROLE alter_role_1 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05' PASSWORD 'test3'; SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'; - row --------------------------------------------------------------------------- + row +--------------------------------------------------------------------- (alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032) (1 row) SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers ------------------------------------------------------------------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"(alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032)") (localhost,57638,t,"(alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032)") (2 rows) SELECT master_remove_node('localhost', :worker_1_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) -ALTER ROLE alter_role_1 WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 0 VALID UNTIL '2052-05-05' PASSWORD 'test4'; +ALTER ROLE alter_role_1 WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 0 VALID UNTIL '2052-05-05' PASSWORD 'test4'; SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'; - row -------------------------------------------------------------------------- + row +--------------------------------------------------------------------- (alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052) (1 row) SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers ------------------------------------------------------------------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)") (1 row) SELECT 1 FROM master_add_node('localhost', :worker_1_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'; - row -------------------------------------------------------------------------- + row +--------------------------------------------------------------------- (alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052) (1 row) SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers ------------------------------------------------------------------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)") (localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)") (2 rows) diff --git a/src/test/regress/expected/base_enable_mx.out b/src/test/regress/expected/base_enable_mx.out index d4fe70c3a..403921e22 100644 --- a/src/test/regress/expected/base_enable_mx.out +++ b/src/test/regress/expected/base_enable_mx.out @@ -2,14 +2,14 @@ -- Setup MX data syncing -- SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) diff --git a/src/test/regress/expected/bool_agg.out b/src/test/regress/expected/bool_agg.out index a8d4316af..b8c872bf9 100644 --- a/src/test/regress/expected/bool_agg.out +++ b/src/test/regress/expected/bool_agg.out @@ -3,50 +3,50 @@ CREATE SCHEMA bool_agg; SET search_path TO bool_agg; CREATE TABLE bool_test (id int, val int, flag bool, kind int); SELECT create_distributed_table('bool_agg.bool_test','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO bool_test VALUES (1, 1, true, 99), (2, 2, false, 99), (2, 3, true, 88); -- mix of true and false SELECT bool_and(flag), bool_or(flag), every(flag) FROM bool_test; - bool_and | bool_or | every -----------+---------+------- + bool_and | bool_or | every +--------------------------------------------------------------------- f | t | f (1 row) SELECT kind, bool_and(flag), bool_or(flag), every(flag) FROM bool_test GROUP BY kind ORDER BY 2; - kind | bool_and | bool_or | every -------+----------+---------+------- + kind | bool_and | bool_or | every +--------------------------------------------------------------------- 99 | f | t | f 88 | t | t | t (2 rows) -- expressions in aggregate SELECT bool_or(val > 2 OR id < 2), bool_and(val < 3) FROM bool_test; - bool_or | bool_and ----------+---------- + bool_or | bool_and +--------------------------------------------------------------------- t | f (1 row) SELECT kind, bool_or(val > 2 OR id < 2), bool_and(val < 3) FROM bool_test GROUP BY kind ORDER BY 3; - kind | bool_or | bool_and -------+---------+---------- + kind | bool_or | bool_and +--------------------------------------------------------------------- 88 | t | f 99 | t | t (2 rows) -- 1 & 3, 1 | 3 SELECT bit_and(val), bit_or(val) FROM bool_test WHERE flag; - bit_and | bit_or ----------+-------- + bit_and | bit_or +--------------------------------------------------------------------- 1 | 3 (1 row) SELECT flag, bit_and(val), bit_or(val) FROM bool_test GROUP BY flag ORDER BY flag; - flag | bit_and | bit_or -------+---------+-------- + flag | bit_and | bit_or +--------------------------------------------------------------------- f | 2 | 2 t | 1 | 3 (2 rows) diff --git a/src/test/regress/expected/ch_bench_having.out b/src/test/regress/expected/ch_bench_having.out index 560948958..e266e37e8 100644 --- a/src/test/regress/expected/ch_bench_having.out +++ b/src/test/regress/expected/ch_bench_having.out @@ -7,9 +7,9 @@ CREATE TABLE stock ( s_order_cnt int NOT NULL ); SELECT create_distributed_table('stock','s_w_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) explain (costs false, summary false, timing false) @@ -19,8 +19,8 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - QUERY PLAN ------------------------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Sort Sort Key: s_i_id InitPlan 1 (returns $0) @@ -29,28 +29,28 @@ order by s_i_id; Group Key: s_i_id Filter: ((pg_catalog.sum(worker_column_3))::bigint > $0) -> Custom Scan (Citus Adaptive) - -> Distributed Subplan 1_1 + -> Distributed Subplan XXX_1 -> Aggregate -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on stock_1640000 stock - -> Distributed Subplan 1_2 + -> Distributed Subplan XXX_2 -> Aggregate -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on stock_1640000 stock Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: stock.s_i_id InitPlan 1 (returns $0) @@ -65,8 +65,8 @@ from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - QUERY PLAN ------------------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Sort Sort Key: s_i_id InitPlan 1 (returns $0) @@ -75,19 +75,19 @@ order by s_i_id; Group Key: s_i_id Filter: ((pg_catalog.sum(worker_column_3))::bigint > $0) -> Custom Scan (Citus Adaptive) - -> Distributed Subplan 4_1 + -> Distributed Subplan XXX_1 -> Aggregate -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on stock_1640000 stock Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: stock.s_i_id -> Seq Scan on stock_1640000 stock @@ -98,27 +98,27 @@ select s_i_id, sum(s_order_cnt) as ordercount from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock); - QUERY PLAN ------------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- HashAggregate Group Key: s_i_id Filter: ((pg_catalog.sum(worker_column_3))::bigint > $0) InitPlan 1 (returns $0) -> Function Scan on read_intermediate_result intermediate_result -> Custom Scan (Citus Adaptive) - -> Distributed Subplan 6_1 + -> Distributed Subplan XXX_1 -> Aggregate -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on stock_1640000 stock Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: stock.s_i_id -> Seq Scan on stock_1640000 stock @@ -129,8 +129,8 @@ from stock s group by s_i_id having (select true) order by s_i_id; - QUERY PLAN -------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: remote_scan.s_i_id InitPlan 1 (returns $0) @@ -142,7 +142,7 @@ order by s_i_id; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate (cost=40.60..42.60 rows=200 width=12) Group Key: s.s_i_id -> Seq Scan on stock_1640000 s (cost=0.00..30.40 rows=2040 width=8) @@ -152,8 +152,8 @@ explain select s_i_id, sum(s_order_cnt) as ordercount from stock s group by s_i_id having (select true); - QUERY PLAN -------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- HashAggregate (cost=0.00..0.00 rows=0 width=0) Group Key: remote_scan.s_i_id Filter: $0 @@ -163,7 +163,7 @@ having (select true); Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate (cost=40.60..42.60 rows=200 width=12) Group Key: s.s_i_id -> Seq Scan on stock_1640000 s (cost=0.00..30.40 rows=2040 width=8) @@ -175,8 +175,8 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- (0 rows) INSERT INTO stock SELECT c, c, c FROM generate_series(1, 5) as c; @@ -186,8 +186,8 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- 3 | 3 4 | 4 5 | 5 @@ -198,8 +198,8 @@ from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- 3 | 3 4 | 4 5 | 5 @@ -211,8 +211,8 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having (select true) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -226,8 +226,8 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having (select false) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- (0 rows) select s_i_id, sum(s_order_cnt) as ordercount @@ -235,8 +235,8 @@ from stock s group by s_i_id having (select true) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -249,8 +249,8 @@ from stock s group by s_i_id having (select false) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- (0 rows) select s_i_id, sum(s_order_cnt) as ordercount @@ -258,8 +258,8 @@ from stock s group by s_i_id having (select true) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -309,9 +309,9 @@ insert into stock VALUES (32, 1, 1, 1, 1, 1, '', '','','','','','','','','',''); SELECT create_distributed_table('stock','s_w_id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) select s_i_id, sum(s_order_cnt) as ordercount @@ -327,8 +327,8 @@ having sum(s_order_cnt) > and s_nationkey = n_nationkey and n_name = 'GERMANY') order by ordercount desc; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- 33 | 1 1 | 1 (2 rows) @@ -348,8 +348,8 @@ having sum(s_order_cnt) > and s_nationkey = n_nationkey and n_name = 'GERMANY') order by ordercount desc; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- 1 | 100001 (1 row) diff --git a/src/test/regress/expected/ch_bench_having_mx.out b/src/test/regress/expected/ch_bench_having_mx.out index 506d2057c..85b109ddc 100644 --- a/src/test/regress/expected/ch_bench_having_mx.out +++ b/src/test/regress/expected/ch_bench_having_mx.out @@ -10,9 +10,9 @@ CREATE TABLE stock ( s_order_cnt int NOT NULL ); SELECT create_distributed_table('stock','s_w_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port @@ -24,8 +24,8 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - QUERY PLAN ------------------------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Sort Sort Key: s_i_id InitPlan 1 (returns $0) @@ -34,28 +34,28 @@ order by s_i_id; Group Key: s_i_id Filter: ((pg_catalog.sum(worker_column_3))::bigint > $0) -> Custom Scan (Citus Adaptive) - -> Distributed Subplan 1_1 + -> Distributed Subplan XXX_1 -> Aggregate -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on stock_1640000 stock - -> Distributed Subplan 1_2 + -> Distributed Subplan XXX_2 -> Aggregate -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on stock_1640000 stock Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: stock.s_i_id InitPlan 1 (returns $0) @@ -70,8 +70,8 @@ from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - QUERY PLAN ------------------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Sort Sort Key: s_i_id InitPlan 1 (returns $0) @@ -80,19 +80,19 @@ order by s_i_id; Group Key: s_i_id Filter: ((pg_catalog.sum(worker_column_3))::bigint > $0) -> Custom Scan (Citus Adaptive) - -> Distributed Subplan 4_1 + -> Distributed Subplan XXX_1 -> Aggregate -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on stock_1640000 stock Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: stock.s_i_id -> Seq Scan on stock_1640000 stock @@ -103,27 +103,27 @@ select s_i_id, sum(s_order_cnt) as ordercount from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock); - QUERY PLAN ------------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- HashAggregate Group Key: s_i_id Filter: ((pg_catalog.sum(worker_column_3))::bigint > $0) InitPlan 1 (returns $0) -> Function Scan on read_intermediate_result intermediate_result -> Custom Scan (Citus Adaptive) - -> Distributed Subplan 6_1 + -> Distributed Subplan XXX_1 -> Aggregate -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on stock_1640000 stock Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: stock.s_i_id -> Seq Scan on stock_1640000 stock @@ -134,8 +134,8 @@ from stock s group by s_i_id having (select true) order by s_i_id; - QUERY PLAN -------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: remote_scan.s_i_id InitPlan 1 (returns $0) @@ -147,7 +147,7 @@ order by s_i_id; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate (cost=40.60..42.60 rows=200 width=12) Group Key: s.s_i_id -> Seq Scan on stock_1640000 s (cost=0.00..30.40 rows=2040 width=8) @@ -157,8 +157,8 @@ explain select s_i_id, sum(s_order_cnt) as ordercount from stock s group by s_i_id having (select true); - QUERY PLAN -------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- HashAggregate (cost=0.00..0.00 rows=0 width=0) Group Key: remote_scan.s_i_id Filter: $0 @@ -168,7 +168,7 @@ having (select true); Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate (cost=40.60..42.60 rows=200 width=12) Group Key: s.s_i_id -> Seq Scan on stock_1640000 s (cost=0.00..30.40 rows=2040 width=8) @@ -180,8 +180,8 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- (0 rows) INSERT INTO stock SELECT c, c, c FROM generate_series(1, 5) as c; @@ -191,8 +191,8 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- 3 | 3 4 | 4 5 | 5 @@ -203,8 +203,8 @@ from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- 3 | 3 4 | 4 5 | 5 @@ -216,8 +216,8 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having (select true) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -231,8 +231,8 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having (select false) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- (0 rows) select s_i_id, sum(s_order_cnt) as ordercount @@ -240,8 +240,8 @@ from stock s group by s_i_id having (select true) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -254,8 +254,8 @@ from stock s group by s_i_id having (select false) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- (0 rows) select s_i_id, sum(s_order_cnt) as ordercount @@ -263,8 +263,8 @@ from stock s group by s_i_id having (select true) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -319,9 +319,9 @@ insert into stock VALUES (32, 1, 1, 1, 1, 1, '', '','','','','','','','','',''); SELECT create_distributed_table('stock','s_w_id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port @@ -339,8 +339,8 @@ having sum(s_order_cnt) > and s_nationkey = n_nationkey and n_name = 'GERMANY') order by ordercount desc; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- 33 | 1 1 | 1 (2 rows) @@ -360,8 +360,8 @@ having sum(s_order_cnt) > and s_nationkey = n_nationkey and n_name = 'GERMANY') order by ordercount desc; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- 1 | 100001 (1 row) diff --git a/src/test/regress/expected/ch_bench_subquery_repartition.out b/src/test/regress/expected/ch_bench_subquery_repartition.out index f2b518dd8..371829fc1 100644 --- a/src/test/regress/expected/ch_bench_subquery_repartition.out +++ b/src/test/regress/expected/ch_bench_subquery_repartition.out @@ -61,33 +61,33 @@ create table supplier ( PRIMARY KEY ( su_suppkey ) ); SELECT create_distributed_table('order_line','ol_w_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('stock','s_w_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('item'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('nation'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('supplier'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO order_line SELECT c, c, c, c, c, NULL, c, c, c, 'abc' FROM generate_series(1, 10) as c; @@ -102,8 +102,8 @@ select s_i_id s_i_id in (select i_id from item) AND s_i_id = ol_i_id order by s_i_id; - s_i_id --------- + s_i_id +--------------------------------------------------------------------- 1 2 3 @@ -150,8 +150,8 @@ where su_suppkey in and su_nationkey = n_nationkey and n_name = 'Germany' order by su_name; - su_name | su_address ----------+------------ + su_name | su_address +--------------------------------------------------------------------- (0 rows) -- Fallback to public tables with prefilled data @@ -184,8 +184,8 @@ where s_suppkey in and s_nationkey = n_nationkey and n_name = 'GERMANY' order by s_name; - s_name | s_address ----------------------------+------------------------------------- + s_name | s_address +--------------------------------------------------------------------- Supplier#000000033 | gfeKpYw3400L0SDywXA6Ya1Qmq1w6YB9f3R (1 row) @@ -205,8 +205,8 @@ where s_suppkey in and s_nationkey = n_nationkey and n_name = 'GERMANY' order by s_name; - s_name | s_address ----------------------------+------------------------------------- + s_name | s_address +--------------------------------------------------------------------- Supplier#000000033 | gfeKpYw3400L0SDywXA6Ya1Qmq1w6YB9f3R Supplier#000000044 | kERxlLDnlIZJdN66zAPHklyL (2 rows) diff --git a/src/test/regress/expected/chbenchmark_all_queries.out b/src/test/regress/expected/chbenchmark_all_queries.out index 45400e8f1..53bab669e 100644 --- a/src/test/regress/expected/chbenchmark_all_queries.out +++ b/src/test/regress/expected/chbenchmark_all_queries.out @@ -145,75 +145,75 @@ CREATE TABLE supplier ( PRIMARY KEY ( su_suppkey ) ); SELECT create_distributed_table('order_line','ol_w_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('new_order','no_w_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('stock','s_w_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('oorder','o_w_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('history','h_w_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('customer','c_w_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('district','d_w_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('warehouse','w_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('item'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('region'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('nation'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('supplier'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) TRUNCATE order_line, new_order, stock, oorder, history, customer, district, warehouse, item, region, nation, supplier; -- for easy copy in development @@ -245,8 +245,8 @@ FROM order_line WHERE ol_delivery_d > '2007-01-02 00:00:00.000000' GROUP BY ol_number ORDER BY ol_number; - ol_number | sum_qty | sum_amount | avg_qty | avg_amount | count_order ------------+---------+------------+------------------------+------------------------+------------- + ol_number | sum_qty | sum_amount | avg_qty | avg_amount | count_order +--------------------------------------------------------------------- 0 | 0 | 0.00 | 0.00000000000000000000 | 0.00000000000000000000 | 1 1 | 1 | 1.00 | 1.00000000000000000000 | 1.00000000000000000000 | 1 2 | 2 | 2.00 | 2.0000000000000000 | 2.0000000000000000 | 1 @@ -301,10 +301,10 @@ ORDER BY n_name, su_name, i_id; - su_suppkey | su_name | n_name | i_id | i_name | su_address | su_phone | su_comment -------------+---------------------------+---------------------------+------+----------+------------+-----------------+------------------------------------------------------------------------------------------------------- - 9 | abc | Germany | 3 | Keyboard | def | ghi | jkl - 4 | abc | The Netherlands | 2 | Keyboard | def | ghi | jkl + su_suppkey | su_name | n_name | i_id | i_name | su_address | su_phone | su_comment +--------------------------------------------------------------------- + 9 | abc | Germany | 3 | Keyboard | def | ghi | jkl + 4 | abc | The Netherlands | 2 | Keyboard | def | ghi | jkl (2 rows) -- Query 3 @@ -338,8 +338,8 @@ GROUP BY ORDER BY revenue DESC, o_entry_d; - ol_o_id | ol_w_id | ol_d_id | revenue | o_entry_d ----------+---------+---------+---------+-------------------------- + ol_o_id | ol_w_id | ol_d_id | revenue | o_entry_d +--------------------------------------------------------------------- 10 | 10 | 10 | 10.00 | Fri Oct 17 00:00:00 2008 9 | 9 | 9 | 9.00 | Fri Oct 17 00:00:00 2008 8 | 8 | 8 | 8.00 | Fri Oct 17 00:00:00 2008 @@ -369,8 +369,8 @@ WHERE o_entry_d >= '2007-01-02 00:00:00.000000' AND ol_delivery_d >= o_entry_d) GROUP BY o_ol_cnt ORDER BY o_ol_cnt; - o_ol_cnt | order_count -----------+------------- + o_ol_cnt | order_count +--------------------------------------------------------------------- 1 | 11 (1 row) @@ -406,8 +406,8 @@ WHERE c_id = o_c_id AND o_entry_d >= '2007-01-02 00:00:00.000000' GROUP BY n_name ORDER BY revenue DESC; - n_name | revenue ----------------------------+--------- + n_name | revenue +--------------------------------------------------------------------- Germany | 3.00 The Netherlands | 2.00 (2 rows) @@ -419,8 +419,8 @@ FROM order_line WHERE ol_delivery_d >= '1999-01-01 00:00:00.000000' AND ol_delivery_d < '2020-01-01 00:00:00.000000' AND ol_quantity BETWEEN 1 AND 100000; - revenue ---------- + revenue +--------------------------------------------------------------------- 55.00 (1 row) @@ -462,8 +462,8 @@ ORDER BY su_nationkey, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue --------------+-------------+--------+--------- + supp_nation | cust_nation | l_year | revenue +--------------------------------------------------------------------- 9 | C | 2008 | 3.00 (1 row) @@ -501,8 +501,8 @@ WHERE i_id = s_i_id AND i_id = ol_i_id GROUP BY extract(YEAR FROM o_entry_d) ORDER BY l_year; - l_year | mkt_share ---------+------------------------ + l_year | mkt_share +--------------------------------------------------------------------- 2008 | 0.50000000000000000000 (1 row) @@ -533,8 +533,8 @@ GROUP BY ORDER BY n_name, l_year DESC; - n_name | l_year | sum_profit ----------------------------+--------+------------ + n_name | l_year | sum_profit +--------------------------------------------------------------------- Germany | 2008 | 3.00 The Netherlands | 2008 | 2.00 United States | 2008 | 1.00 @@ -569,19 +569,19 @@ GROUP BY c_phone, n_name ORDER BY revenue DESC; - c_id | c_last | revenue | c_city | c_phone | n_name -------+--------+---------+-----------+------------------+--------------------------- - 10 | John | 10.00 | Some City | +1 000 0000000 | Cambodia - 9 | John | 9.00 | Some City | +1 000 0000000 | Cambodia - 8 | John | 8.00 | Some City | +1 000 0000000 | Cambodia - 7 | John | 7.00 | Some City | +1 000 0000000 | Cambodia - 6 | John | 6.00 | Some City | +1 000 0000000 | Cambodia - 5 | John | 5.00 | Some City | +1 000 0000000 | Cambodia - 4 | John | 4.00 | Some City | +1 000 0000000 | Cambodia - 3 | John | 3.00 | Some City | +1 000 0000000 | Cambodia - 2 | John | 2.00 | Some City | +1 000 0000000 | Cambodia - 1 | John | 1.00 | Some City | +1 000 0000000 | Cambodia - 0 | John | 0.00 | Some City | +1 000 0000000 | Cambodia + c_id | c_last | revenue | c_city | c_phone | n_name +--------------------------------------------------------------------- + 10 | John | 10.00 | Some City | +1 000 0000000 | Cambodia + 9 | John | 9.00 | Some City | +1 000 0000000 | Cambodia + 8 | John | 8.00 | Some City | +1 000 0000000 | Cambodia + 7 | John | 7.00 | Some City | +1 000 0000000 | Cambodia + 6 | John | 6.00 | Some City | +1 000 0000000 | Cambodia + 5 | John | 5.00 | Some City | +1 000 0000000 | Cambodia + 4 | John | 4.00 | Some City | +1 000 0000000 | Cambodia + 3 | John | 3.00 | Some City | +1 000 0000000 | Cambodia + 2 | John | 2.00 | Some City | +1 000 0000000 | Cambodia + 1 | John | 1.00 | Some City | +1 000 0000000 | Cambodia + 0 | John | 0.00 | Some City | +1 000 0000000 | Cambodia (11 rows) -- Query 11 @@ -606,8 +606,8 @@ HAVING sum(s_order_cnt) > AND su_nationkey = n_nationkey AND n_name = 'Germany') ORDER BY ordercount DESC; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- 3 | 3 (1 row) @@ -626,8 +626,8 @@ WHERE ol_w_id = o_w_id AND ol_delivery_d < '2020-01-01 00:00:00.000000' GROUP BY o_ol_cnt ORDER BY o_ol_cnt; - o_ol_cnt | high_line_count | low_line_count -----------+-----------------+---------------- + o_ol_cnt | high_line_count | low_line_count +--------------------------------------------------------------------- 1 | 2 | 9 (1 row) @@ -649,8 +649,8 @@ GROUP BY c_count ORDER BY custdist DESC, c_count DESC; - c_count | custdist ----------+---------- + c_count | custdist +--------------------------------------------------------------------- 0 | 9 1 | 2 (2 rows) @@ -664,8 +664,8 @@ FROM WHERE ol_i_id = i_id AND ol_delivery_d >= '2007-01-02 00:00:00.000000' AND ol_delivery_d < '2020-01-02 00:00:00.000000'; - promo_revenue ------------------------- + promo_revenue +--------------------------------------------------------------------- 0.00000000000000000000 (1 row) @@ -693,8 +693,8 @@ FROM WHERE su_suppkey = supplier_no AND total_revenue = (SELECT max(total_revenue) FROM revenue) ORDER BY su_suppkey; - su_suppkey | su_name | su_address | su_phone | total_revenue -------------+---------------------------+------------+-----------------+--------------- + su_suppkey | su_name | su_address | su_phone | total_revenue +--------------------------------------------------------------------- 9 | abc | def | ghi | 3.00 (1 row) @@ -718,8 +718,8 @@ GROUP BY substr(i_data, 1, 3), i_price ORDER BY supplier_cnt DESC; - i_name | brand | i_price | supplier_cnt -----------+-------+---------+-------------- + i_name | brand | i_price | supplier_cnt +--------------------------------------------------------------------- Keyboard | co | 50.00 | 3 (1 row) @@ -738,8 +738,8 @@ FROM AND ol_i_id = i_id GROUP BY i_id) t WHERE ol_i_id = t.i_id; - avg_yearly ---------------------- + avg_yearly +--------------------------------------------------------------------- 27.5000000000000000 (1 row) @@ -775,8 +775,8 @@ HAVING sum(ol_amount) > 5 -- was 200, but thats too big for the dataset ORDER BY sum(ol_amount) DESC, o_entry_d; - c_last | o_id | o_entry_d | o_ol_cnt | sum ---------+------+--------------------------+----------+------- + c_last | o_id | o_entry_d | o_ol_cnt | sum +--------------------------------------------------------------------- John | 10 | Fri Oct 17 00:00:00 2008 | 1 | 10.00 John | 9 | Fri Oct 17 00:00:00 2008 | 1 | 9.00 John | 8 | Fri Oct 17 00:00:00 2008 | 1 | 8.00 @@ -808,8 +808,8 @@ WHERE ( ol_i_id = i_id AND ol_quantity <= 10 AND i_price BETWEEN 1 AND 400000 AND ol_w_id IN (1,5,3)); - revenue ---------- + revenue +--------------------------------------------------------------------- 7.00 (1 row) @@ -837,8 +837,8 @@ WHERE su_suppkey in AND su_nationkey = n_nationkey AND n_name = 'Germany' ORDER BY su_name; - su_name | su_address ----------------------------+------------ + su_name | su_address +--------------------------------------------------------------------- abc | def (1 row) @@ -872,8 +872,8 @@ GROUP BY su_name ORDER BY numwait desc, su_name; - su_name | numwait ----------+--------- + su_name | numwait +--------------------------------------------------------------------- (0 rows) -- Query 22 @@ -895,8 +895,8 @@ WHERE substr(c_phone,1,1) in ('1','2','3','4','5','6','7') AND o_d_id = c_d_id) GROUP BY substr(c_state,1,1) ORDER BY substr(c_state,1,1); - country | numcust | totacctbal ----------+---------+------------ + country | numcust | totacctbal +--------------------------------------------------------------------- (0 rows) SET client_min_messages TO WARNING; diff --git a/src/test/regress/expected/coordinator_shouldhaveshards.out b/src/test/regress/expected/coordinator_shouldhaveshards.out index bc9d0e8eb..06e20c4b3 100644 --- a/src/test/regress/expected/coordinator_shouldhaveshards.out +++ b/src/test/regress/expected/coordinator_shouldhaveshards.out @@ -4,30 +4,30 @@ SET search_path TO coordinator_shouldhaveshards; -- idempotently add node to allow this test to run without add_coordinator SET client_min_messages TO WARNING; SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) RESET client_min_messages; SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SET citus.shard_replication_factor TO 1; CREATE TABLE test (x int, y int); SELECT create_distributed_table('test','x', colocate_with := 'none'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard JOIN pg_dist_placement USING (shardid) WHERE logicalrelid = 'test'::regclass AND groupid = 0; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -36,49 +36,49 @@ INSERT INTO test SELECT s,s FROM generate_series(2,100) s; -- router queries execute locally INSERT INTO test VALUES (1, 1); SELECT y FROM test WHERE x = 1; - y ---- + y +--------------------------------------------------------------------- 1 (1 row) -- multi-shard queries connect to localhost SELECT count(*) FROM test; - count -------- + count +--------------------------------------------------------------------- 100 (1 row) WITH a AS (SELECT * FROM test) SELECT count(*) FROM test; - count -------- + count +--------------------------------------------------------------------- 100 (1 row) -- multi-shard queries in transaction blocks execute locally BEGIN; SELECT y FROM test WHERE x = 1; - y ---- + y +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM test; - count -------- + count +--------------------------------------------------------------------- 100 (1 row) END; BEGIN; SELECT y FROM test WHERE x = 1; - y ---- + y +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM test; - count -------- + count +--------------------------------------------------------------------- 100 (1 row) @@ -88,8 +88,8 @@ ALTER TABLE test ADD COLUMN z int; -- DDL after local execution BEGIN; SELECT y FROM test WHERE x = 1; - y ---- + y +--------------------------------------------------------------------- 1 (1 row) @@ -101,8 +101,8 @@ ROLLBACK; BEGIN; ALTER TABLE test DROP COLUMN z; SELECT y FROM test WHERE x = 1; - y ---- + y +--------------------------------------------------------------------- 1 (1 row) @@ -111,8 +111,8 @@ DELETE FROM test; DROP TABLE test; DROP SCHEMA coordinator_shouldhaveshards CASCADE; SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', false); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/cte_nested_modification.out b/src/test/regress/expected/cte_nested_modification.out index fca88d15a..49f478f01 100644 --- a/src/test/regress/expected/cte_nested_modification.out +++ b/src/test/regress/expected/cte_nested_modification.out @@ -4,18 +4,18 @@ CREATE TABLE tt1(id int, value_1 int); INSERT INTO tt1 VALUES(1,2),(2,3),(3,4); SELECT create_distributed_table('tt1','id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE tt2(id int, value_1 int); INSERT INTO tt2 VALUES(3,3),(4,4),(5,5); SELECT create_distributed_table('tt2','id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE tt3(id int, json_val json); @@ -40,8 +40,8 @@ SET value_1 = abs(2 + 3.5) FROM cte_1 WHERE cte_1.id = tt1.id; SELECT * FROM tt1 ORDER BY id; - id | value_1 -----+--------- + id | value_1 +--------------------------------------------------------------------- 1 | 2 2 | 6 3 | 4 @@ -64,8 +64,8 @@ WITH cte_1 AS ( UPDATE tt1 SET value_1 = (SELECT max(id) + abs(2 + 3.5) FROM cte_1); SELECT * FROM tt1 ORDER BY id; - id | value_1 -----+--------- + id | value_1 +--------------------------------------------------------------------- 1 | 9 2 | 9 3 | 9 @@ -88,8 +88,8 @@ WITH cte_1(id) AS ( UPDATE tt1 SET value_1 = (SELECT max(id) + abs(2 + 3.5) FROM cte_1); SELECT * FROM tt1 ORDER BY id; - id | value_1 -----+--------- + id | value_1 +--------------------------------------------------------------------- 1 | 9 2 | 9 3 | 9 @@ -114,8 +114,8 @@ DELETE FROM tt1 USING cte_1 WHERE tt1.id < cte_1.id; SELECT * FROM tt1 ORDER BY id; - id | value_1 -----+--------- + id | value_1 +--------------------------------------------------------------------- 3 | 4 (1 row) @@ -134,8 +134,8 @@ DELETE FROM tt1 USING cte_1 WHERE tt1.id < cte_1.id; SELECT * FROM tt1 ORDER BY id; - id | value_1 -----+--------- + id | value_1 +--------------------------------------------------------------------- (0 rows) ROLLBACK; diff --git a/src/test/regress/expected/cte_prepared_modify.out b/src/test/regress/expected/cte_prepared_modify.out index 390a8d72d..9460ab275 100644 --- a/src/test/regress/expected/cte_prepared_modify.out +++ b/src/test/regress/expected/cte_prepared_modify.out @@ -4,18 +4,18 @@ CREATE TABLE tt1(id int, value_1 int); INSERT INTO tt1 VALUES(1,2),(2,3),(3,4); SELECT create_distributed_table('tt1','id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE tt2(id int, value_1 int); INSERT INTO tt2 VALUES(3,3),(4,4),(5,5); SELECT create_distributed_table('tt2','id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Test with prepared statements (parameter used by SET) diff --git a/src/test/regress/expected/custom_aggregate_support.out b/src/test/regress/expected/custom_aggregate_support.out index ecc7cdc47..606e1e2ab 100644 --- a/src/test/regress/expected/custom_aggregate_support.out +++ b/src/test/regress/expected/custom_aggregate_support.out @@ -14,15 +14,15 @@ SET citus.shard_count TO 4; CREATE TABLE raw_table (day date, user_id int); CREATE TABLE daily_uniques(day date, unique_users hll); SELECT create_distributed_table('raw_table', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('daily_uniques', 'day'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO raw_table @@ -38,8 +38,8 @@ SELECT hll_cardinality(hll_union_agg(agg)) FROM ( SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg FROM raw_table)a; - hll_cardinality ------------------ + hll_cardinality +--------------------------------------------------------------------- 19 (1 row) @@ -54,8 +54,8 @@ FROM daily_uniques WHERE day >= '2018-06-20' and day <= '2018-06-30' ORDER BY 2 DESC,1 LIMIT 10; - day | hll_cardinality -------------+----------------- + day | hll_cardinality +--------------------------------------------------------------------- 06-20-2018 | 19 06-21-2018 | 19 06-22-2018 | 19 @@ -72,8 +72,8 @@ LIMIT 10; SELECT hll_cardinality(hll_union_agg(unique_users)) FROM daily_uniques WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date; - hll_cardinality ------------------ + hll_cardinality +--------------------------------------------------------------------- 19 (1 row) @@ -82,8 +82,8 @@ FROM daily_uniques WHERE day >= '2018-06-23' AND day <= '2018-07-01' GROUP BY 1 ORDER BY 1; - month | hll_cardinality --------+----------------- + month | hll_cardinality +--------------------------------------------------------------------- 6 | 19 7 | 13 (2 rows) @@ -108,31 +108,31 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN ------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360615 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360616 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360617 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360618 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques (23 rows) SET hll.force_groupagg to ON; @@ -142,31 +142,31 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN ------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360615 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360616 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360617 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360618 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques (23 rows) -- Test disabling hash_agg with operator on coordinator query @@ -177,31 +177,31 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN ------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360615 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360616 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360617 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360618 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques (23 rows) SET hll.force_groupagg to ON; @@ -211,31 +211,31 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN ------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360615 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360616 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360617 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360618 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques (23 rows) -- Test disabling hash_agg with expression on coordinator query @@ -246,31 +246,31 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN ------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360615 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360616 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360617 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360618 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques (23 rows) SET hll.force_groupagg to ON; @@ -280,31 +280,31 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN ------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360615 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360616 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360617 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360618 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques (23 rows) -- Test disabling hash_agg with having @@ -315,31 +315,31 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN ------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360615 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360616 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360617 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360618 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques (23 rows) SET hll.force_groupagg to ON; @@ -350,35 +350,35 @@ FROM daily_uniques GROUP BY(1) HAVING hll_cardinality(hll_union_agg(unique_users)) > 1; - QUERY PLAN ----------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision) - -> Seq Scan on daily_uniques_360615 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision) - -> Seq Scan on daily_uniques_360616 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision) - -> Seq Scan on daily_uniques_360617 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision) - -> Seq Scan on daily_uniques_360618 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques (27 rows) DROP TABLE raw_table; @@ -395,15 +395,15 @@ WHERE name = 'topn' CREATE TABLE customer_reviews (day date, user_id int, review int); CREATE TABLE popular_reviewer(day date, reviewers jsonb); SELECT create_distributed_table('customer_reviews', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('popular_reviewer', 'day'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO customer_reviews @@ -421,8 +421,8 @@ FROM ( FROM customer_reviews )a ORDER BY 2 DESC, 1; - item | frequency -------+----------- + item | frequency +--------------------------------------------------------------------- 1 | 7843 2 | 7843 3 | 6851 @@ -446,8 +446,8 @@ FROM popular_reviewer WHERE day >= '2018-06-20' and day <= '2018-06-30' ORDER BY 3 DESC, 1, 2 LIMIT 10; - day | item | frequency -------------+------+----------- + day | item | frequency +--------------------------------------------------------------------- 06-20-2018 | 1 | 248 06-20-2018 | 2 | 248 06-21-2018 | 1 | 248 @@ -468,8 +468,8 @@ FROM ( WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date )a ORDER BY 2 DESC, 1; - item | frequency -------+----------- + item | frequency +--------------------------------------------------------------------- 1 | 1240 2 | 1240 0 | 992 @@ -488,8 +488,8 @@ FROM ( ORDER BY 1 )a ORDER BY 1, 3 DESC, 2; - month | item | frequency --------+------+----------- + month | item | frequency +--------------------------------------------------------------------- 6 | 1 | 1054 6 | 2 | 1054 6 | 3 | 992 @@ -509,14 +509,10 @@ FROM popular_reviewer WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date ORDER BY 2 DESC, 1; ERROR: set-valued function called in context that cannot accept a set -LINE 1: SELECT (topn(topn_union_agg(reviewers), 10)).* - ^ SELECT (topn(topn_add_agg(user_id::text), 10)).* FROM customer_reviews ORDER BY 2 DESC, 1; ERROR: set-valued function called in context that cannot accept a set -LINE 1: SELECT (topn(topn_add_agg(user_id::text), 10)).* - ^ -- The following is going to be supported after window function support SELECT day, (topn(agg, 10)).* FROM ( diff --git a/src/test/regress/expected/custom_aggregate_support_0.out b/src/test/regress/expected/custom_aggregate_support_0.out index 6e65bc9a9..c2e322e52 100644 --- a/src/test/regress/expected/custom_aggregate_support_0.out +++ b/src/test/regress/expected/custom_aggregate_support_0.out @@ -9,8 +9,8 @@ AS create_cmd FROM pg_available_extensions() WHERE name = 'hll' \gset :create_cmd; - hll_present -------------- + hll_present +--------------------------------------------------------------------- f (1 row) @@ -18,80 +18,62 @@ SET citus.shard_count TO 4; CREATE TABLE raw_table (day date, user_id int); CREATE TABLE daily_uniques(day date, unique_users hll); ERROR: type "hll" does not exist -LINE 1: CREATE TABLE daily_uniques(day date, unique_users hll); - ^ SELECT create_distributed_table('raw_table', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('daily_uniques', 'day'); ERROR: relation "daily_uniques" does not exist -LINE 1: SELECT create_distributed_table('daily_uniques', 'day'); - ^ -INSERT INTO raw_table - SELECT day, user_id % 19 +INSERT INTO raw_table + SELECT day, user_id % 19 FROM generate_series('2018-05-24'::timestamp, '2018-06-24'::timestamp, '1 day'::interval) as f(day), generate_series(1,100) as g(user_id); -INSERT INTO raw_table - SELECT day, user_id % 13 - FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day), +INSERT INTO raw_table + SELECT day, user_id % 13 + FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day), generate_series(1,100) as g(user_id); -- Run hll on raw data -SELECT hll_cardinality(hll_union_agg(agg)) +SELECT hll_cardinality(hll_union_agg(agg)) FROM ( - SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg + SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg FROM raw_table)a; ERROR: function hll_hash_integer(integer) does not exist -LINE 3: SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg - ^ HINT: No function matches the given name and argument types. You might need to add explicit type casts. -- Aggregate the data into daily_uniques -INSERT INTO daily_uniques - SELECT day, hll_add_agg(hll_hash_integer(user_id)) +INSERT INTO daily_uniques + SELECT day, hll_add_agg(hll_hash_integer(user_id)) FROM raw_table GROUP BY 1; ERROR: relation "daily_uniques" does not exist -LINE 1: INSERT INTO daily_uniques - ^ -- Basic hll_cardinality check on aggregated data -SELECT day, hll_cardinality(unique_users) -FROM daily_uniques -WHERE day >= '2018-06-20' and day <= '2018-06-30' -ORDER BY 2 DESC,1 +SELECT day, hll_cardinality(unique_users) +FROM daily_uniques +WHERE day >= '2018-06-20' and day <= '2018-06-30' +ORDER BY 2 DESC,1 LIMIT 10; ERROR: relation "daily_uniques" does not exist -LINE 2: FROM daily_uniques - ^ -- Union aggregated data for one week -SELECT hll_cardinality(hll_union_agg(unique_users)) -FROM daily_uniques +SELECT hll_cardinality(hll_union_agg(unique_users)) +FROM daily_uniques WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date; ERROR: relation "daily_uniques" does not exist -LINE 2: FROM daily_uniques - ^ SELECT EXTRACT(MONTH FROM day) AS month, hll_cardinality(hll_union_agg(unique_users)) FROM daily_uniques WHERE day >= '2018-06-23' AND day <= '2018-07-01' -GROUP BY 1 +GROUP BY 1 ORDER BY 1; ERROR: relation "daily_uniques" does not exist -LINE 2: FROM daily_uniques - ^ -- These are going to be supported after window function support SELECT day, hll_cardinality(hll_union_agg(unique_users) OVER seven_days) FROM daily_uniques WINDOW seven_days AS (ORDER BY day ASC ROWS 6 PRECEDING); ERROR: relation "daily_uniques" does not exist -LINE 2: FROM daily_uniques - ^ SELECT day, (hll_cardinality(hll_union_agg(unique_users) OVER two_days)) - hll_cardinality(unique_users) AS lost_uniques FROM daily_uniques WINDOW two_days AS (ORDER BY day ASC ROWS 1 PRECEDING); ERROR: relation "daily_uniques" does not exist -LINE 2: FROM daily_uniques - ^ -- Test disabling hash_agg on coordinator query SET citus.explain_all_tasks to true; SET hll.force_groupagg to OFF; @@ -102,8 +84,6 @@ FROM daily_uniques GROUP BY(1); ERROR: relation "daily_uniques" does not exist -LINE 5: daily_uniques - ^ SET hll.force_groupagg to ON; EXPLAIN(COSTS OFF) SELECT @@ -112,8 +92,6 @@ FROM daily_uniques GROUP BY(1); ERROR: relation "daily_uniques" does not exist -LINE 5: daily_uniques - ^ -- Test disabling hash_agg with operator on coordinator query SET hll.force_groupagg to OFF; EXPLAIN(COSTS OFF) @@ -123,8 +101,6 @@ FROM daily_uniques GROUP BY(1); ERROR: relation "daily_uniques" does not exist -LINE 5: daily_uniques - ^ SET hll.force_groupagg to ON; EXPLAIN(COSTS OFF) SELECT @@ -133,8 +109,6 @@ FROM daily_uniques GROUP BY(1); ERROR: relation "daily_uniques" does not exist -LINE 5: daily_uniques - ^ -- Test disabling hash_agg with expression on coordinator query SET hll.force_groupagg to OFF; EXPLAIN(COSTS OFF) @@ -144,8 +118,6 @@ FROM daily_uniques GROUP BY(1); ERROR: relation "daily_uniques" does not exist -LINE 5: daily_uniques - ^ SET hll.force_groupagg to ON; EXPLAIN(COSTS OFF) SELECT @@ -154,8 +126,6 @@ FROM daily_uniques GROUP BY(1); ERROR: relation "daily_uniques" does not exist -LINE 5: daily_uniques - ^ -- Test disabling hash_agg with having SET hll.force_groupagg to OFF; EXPLAIN(COSTS OFF) @@ -165,8 +135,6 @@ FROM daily_uniques GROUP BY(1); ERROR: relation "daily_uniques" does not exist -LINE 5: daily_uniques - ^ SET hll.force_groupagg to ON; EXPLAIN(COSTS OFF) SELECT @@ -176,8 +144,6 @@ FROM GROUP BY(1) HAVING hll_cardinality(hll_union_agg(unique_users)) > 1; ERROR: relation "daily_uniques" does not exist -LINE 5: daily_uniques - ^ DROP TABLE raw_table; DROP TABLE daily_uniques; ERROR: table "daily_uniques" does not exist @@ -190,65 +156,59 @@ AS create_topn FROM pg_available_extensions() WHERE name = 'topn' \gset :create_topn; - topn_present --------------- + topn_present +--------------------------------------------------------------------- f (1 row) CREATE TABLE customer_reviews (day date, user_id int, review int); CREATE TABLE popular_reviewer(day date, reviewers jsonb); SELECT create_distributed_table('customer_reviews', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('popular_reviewer', 'day'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -INSERT INTO customer_reviews +INSERT INTO customer_reviews SELECT day, user_id % 7, review % 5 FROM generate_series('2018-05-24'::timestamp, '2018-06-24'::timestamp, '1 day'::interval) as f(day), generate_series(1,30) as g(user_id), generate_series(0,30) AS r(review); -INSERT INTO customer_reviews +INSERT INTO customer_reviews SELECT day, user_id % 13, review % 3 - FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day), + FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day), generate_series(1,30) as g(user_id), generate_series(0,30) AS r(review); -- Run topn on raw data SELECT (topn(agg, 10)).* FROM ( - SELECT topn_add_agg(user_id::text) AS agg + SELECT topn_add_agg(user_id::text) AS agg FROM customer_reviews )a ORDER BY 2 DESC, 1; ERROR: function topn_add_agg(text) does not exist -LINE 3: SELECT topn_add_agg(user_id::text) AS agg - ^ HINT: No function matches the given name and argument types. You might need to add explicit type casts. -- Aggregate the data into popular_reviewer -INSERT INTO popular_reviewer +INSERT INTO popular_reviewer SELECT day, topn_add_agg(user_id::text) FROM customer_reviews GROUP BY 1; ERROR: function topn_add_agg(text) does not exist -LINE 2: SELECT day, topn_add_agg(user_id::text) - ^ HINT: No function matches the given name and argument types. You might need to add explicit type casts. -- Basic topn check on aggregated data -SELECT day, (topn(reviewers, 10)).* -FROM popular_reviewer -WHERE day >= '2018-06-20' and day <= '2018-06-30' +SELECT day, (topn(reviewers, 10)).* +FROM popular_reviewer +WHERE day >= '2018-06-20' and day <= '2018-06-30' ORDER BY 3 DESC, 1, 2 LIMIT 10; ERROR: function topn(jsonb, integer) does not exist -LINE 1: SELECT day, (topn(reviewers, 10)).* - ^ HINT: No function matches the given name and argument types. You might need to add explicit type casts. -- Union aggregated data for one week -SELECT (topn(agg, 10)).* +SELECT (topn(agg, 10)).* FROM ( SELECT topn_union_agg(reviewers) AS agg FROM popular_reviewer @@ -256,10 +216,8 @@ FROM ( )a ORDER BY 2 DESC, 1; ERROR: function topn_union_agg(jsonb) does not exist -LINE 3: SELECT topn_union_agg(reviewers) AS agg - ^ HINT: No function matches the given name and argument types. You might need to add explicit type casts. -SELECT month, (topn(agg, 5)).* +SELECT month, (topn(agg, 5)).* FROM ( SELECT EXTRACT(MONTH FROM day) AS month, topn_union_agg(reviewers) AS agg FROM popular_reviewer @@ -269,25 +227,19 @@ FROM ( )a ORDER BY 1, 3 DESC, 2; ERROR: function topn_union_agg(jsonb) does not exist -LINE 3: SELECT EXTRACT(MONTH FROM day) AS month, topn_union_agg(rev... - ^ HINT: No function matches the given name and argument types. You might need to add explicit type casts. -- TODO the following queries will be supported after we fix #2265 -- They work for PG9.6 but not for PG10 -SELECT (topn(topn_union_agg(reviewers), 10)).* +SELECT (topn(topn_union_agg(reviewers), 10)).* FROM popular_reviewer WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date ORDER BY 2 DESC, 1; ERROR: function topn_union_agg(jsonb) does not exist -LINE 1: SELECT (topn(topn_union_agg(reviewers), 10)).* - ^ HINT: No function matches the given name and argument types. You might need to add explicit type casts. SELECT (topn(topn_add_agg(user_id::text), 10)).* FROM customer_reviews ORDER BY 2 DESC, 1; ERROR: function topn_add_agg(text) does not exist -LINE 1: SELECT (topn(topn_add_agg(user_id::text), 10)).* - ^ HINT: No function matches the given name and argument types. You might need to add explicit type casts. -- The following is going to be supported after window function support SELECT day, (topn(agg, 10)).* @@ -299,8 +251,6 @@ FROM ( ORDER BY 3 DESC, 1, 2 LIMIT 10; ERROR: function topn_union_agg(jsonb) does not exist -LINE 3: SELECT day, topn_union_agg(reviewers) OVER seven_days AS ag... - ^ HINT: No function matches the given name and argument types. You might need to add explicit type casts. SELECT day, (topn(topn_add_agg(user_id::text) OVER seven_days, 10)).* FROM customer_reviews @@ -308,8 +258,6 @@ WINDOW seven_days AS (ORDER BY day ASC ROWS 6 PRECEDING) ORDER BY 3 DESC, 1, 2 LIMIT 10; ERROR: function topn_add_agg(text) does not exist -LINE 1: SELECT day, (topn(topn_add_agg(user_id::text) OVER seven_day... - ^ HINT: No function matches the given name and argument types. You might need to add explicit type casts. DROP TABLE customer_reviews; DROP TABLE popular_reviewer; diff --git a/src/test/regress/expected/custom_aggregate_support_1.out b/src/test/regress/expected/custom_aggregate_support_1.out index d05adad1d..8709a716b 100644 --- a/src/test/regress/expected/custom_aggregate_support_1.out +++ b/src/test/regress/expected/custom_aggregate_support_1.out @@ -14,48 +14,48 @@ SET citus.shard_count TO 4; CREATE TABLE raw_table (day date, user_id int); CREATE TABLE daily_uniques(day date, unique_users hll); SELECT create_distributed_table('raw_table', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('daily_uniques', 'day'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -INSERT INTO raw_table - SELECT day, user_id % 19 +INSERT INTO raw_table + SELECT day, user_id % 19 FROM generate_series('2018-05-24'::timestamp, '2018-06-24'::timestamp, '1 day'::interval) as f(day), generate_series(1,100) as g(user_id); -INSERT INTO raw_table - SELECT day, user_id % 13 - FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day), +INSERT INTO raw_table + SELECT day, user_id % 13 + FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day), generate_series(1,100) as g(user_id); -- Run hll on raw data -SELECT hll_cardinality(hll_union_agg(agg)) +SELECT hll_cardinality(hll_union_agg(agg)) FROM ( - SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg + SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg FROM raw_table)a; - hll_cardinality ------------------ + hll_cardinality +--------------------------------------------------------------------- 19 (1 row) -- Aggregate the data into daily_uniques -INSERT INTO daily_uniques - SELECT day, hll_add_agg(hll_hash_integer(user_id)) +INSERT INTO daily_uniques + SELECT day, hll_add_agg(hll_hash_integer(user_id)) FROM raw_table GROUP BY 1; -- Basic hll_cardinality check on aggregated data -SELECT day, hll_cardinality(unique_users) -FROM daily_uniques -WHERE day >= '2018-06-20' and day <= '2018-06-30' -ORDER BY 2 DESC,1 +SELECT day, hll_cardinality(unique_users) +FROM daily_uniques +WHERE day >= '2018-06-20' and day <= '2018-06-30' +ORDER BY 2 DESC,1 LIMIT 10; - day | hll_cardinality -------------+----------------- + day | hll_cardinality +--------------------------------------------------------------------- 06-20-2018 | 19 06-21-2018 | 19 06-22-2018 | 19 @@ -69,21 +69,21 @@ LIMIT 10; (10 rows) -- Union aggregated data for one week -SELECT hll_cardinality(hll_union_agg(unique_users)) -FROM daily_uniques +SELECT hll_cardinality(hll_union_agg(unique_users)) +FROM daily_uniques WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date; - hll_cardinality ------------------ + hll_cardinality +--------------------------------------------------------------------- 19 (1 row) SELECT EXTRACT(MONTH FROM day) AS month, hll_cardinality(hll_union_agg(unique_users)) FROM daily_uniques WHERE day >= '2018-06-23' AND day <= '2018-07-01' -GROUP BY 1 +GROUP BY 1 ORDER BY 1; - month | hll_cardinality --------+----------------- + month | hll_cardinality +--------------------------------------------------------------------- 6 | 19 7 | 13 (2 rows) @@ -108,33 +108,33 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN ------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- HashAggregate Group Key: remote_scan.day -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360261 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360262 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360263 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360264 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques (25 rows) SET hll.force_groupagg to ON; @@ -144,8 +144,8 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN ------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- GroupAggregate Group Key: remote_scan.day -> Sort @@ -154,25 +154,25 @@ GROUP BY(1); Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360261 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360262 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360263 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360264 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques (27 rows) -- Test disabling hash_agg with operator on coordinator query @@ -183,33 +183,33 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN ------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- HashAggregate Group Key: remote_scan.day -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360261 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360262 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360263 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360264 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques (25 rows) SET hll.force_groupagg to ON; @@ -219,8 +219,8 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN ------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- GroupAggregate Group Key: remote_scan.day -> Sort @@ -229,25 +229,25 @@ GROUP BY(1); Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360261 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360262 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360263 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360264 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques (27 rows) -- Test disabling hash_agg with expression on coordinator query @@ -258,33 +258,33 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN ------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- HashAggregate Group Key: remote_scan.day -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360261 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360262 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360263 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360264 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques (25 rows) SET hll.force_groupagg to ON; @@ -294,8 +294,8 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN ------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- GroupAggregate Group Key: remote_scan.day -> Sort @@ -304,25 +304,25 @@ GROUP BY(1); Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360261 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360262 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360263 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360264 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques (27 rows) -- Test disabling hash_agg with having @@ -333,33 +333,33 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN ------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- HashAggregate Group Key: remote_scan.day -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360261 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360262 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360263 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: day - -> Seq Scan on daily_uniques_360264 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques (25 rows) SET hll.force_groupagg to ON; @@ -370,8 +370,8 @@ FROM daily_uniques GROUP BY(1) HAVING hll_cardinality(hll_union_agg(unique_users)) > 1; - QUERY PLAN ----------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- GroupAggregate Group Key: remote_scan.day Filter: (hll_cardinality(hll_union_agg(remote_scan.worker_column_3)) > '1'::double precision) @@ -381,37 +381,37 @@ HAVING hll_cardinality(hll_union_agg(unique_users)) > 1; Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> GroupAggregate Group Key: day Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision) -> Sort Sort Key: day - -> Seq Scan on daily_uniques_360261 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> GroupAggregate Group Key: day Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision) -> Sort Sort Key: day - -> Seq Scan on daily_uniques_360262 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> GroupAggregate Group Key: day Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision) -> Sort Sort Key: day - -> Seq Scan on daily_uniques_360263 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> GroupAggregate Group Key: day Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision) -> Sort Sort Key: day - -> Seq Scan on daily_uniques_360264 daily_uniques + -> Seq Scan on daily_uniques_xxxxxxx daily_uniques (40 rows) DROP TABLE raw_table; @@ -428,34 +428,34 @@ WHERE name = 'topn' CREATE TABLE customer_reviews (day date, user_id int, review int); CREATE TABLE popular_reviewer(day date, reviewers jsonb); SELECT create_distributed_table('customer_reviews', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('popular_reviewer', 'day'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -INSERT INTO customer_reviews +INSERT INTO customer_reviews SELECT day, user_id % 7, review % 5 FROM generate_series('2018-05-24'::timestamp, '2018-06-24'::timestamp, '1 day'::interval) as f(day), generate_series(1,30) as g(user_id), generate_series(0,30) AS r(review); -INSERT INTO customer_reviews +INSERT INTO customer_reviews SELECT day, user_id % 13, review % 3 - FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day), + FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day), generate_series(1,30) as g(user_id), generate_series(0,30) AS r(review); -- Run topn on raw data SELECT (topn(agg, 10)).* FROM ( - SELECT topn_add_agg(user_id::text) AS agg + SELECT topn_add_agg(user_id::text) AS agg FROM customer_reviews )a ORDER BY 2 DESC, 1; - item | frequency -------+----------- + item | frequency +--------------------------------------------------------------------- 1 | 7843 2 | 7843 3 | 6851 @@ -469,18 +469,18 @@ ORDER BY 2 DESC, 1; (10 rows) -- Aggregate the data into popular_reviewer -INSERT INTO popular_reviewer +INSERT INTO popular_reviewer SELECT day, topn_add_agg(user_id::text) FROM customer_reviews GROUP BY 1; -- Basic topn check on aggregated data -SELECT day, (topn(reviewers, 10)).* -FROM popular_reviewer -WHERE day >= '2018-06-20' and day <= '2018-06-30' +SELECT day, (topn(reviewers, 10)).* +FROM popular_reviewer +WHERE day >= '2018-06-20' and day <= '2018-06-30' ORDER BY 3 DESC, 1, 2 LIMIT 10; - day | item | frequency -------------+------+----------- + day | item | frequency +--------------------------------------------------------------------- 06-20-2018 | 1 | 248 06-20-2018 | 2 | 248 06-21-2018 | 1 | 248 @@ -494,15 +494,15 @@ LIMIT 10; (10 rows) -- Union aggregated data for one week -SELECT (topn(agg, 10)).* +SELECT (topn(agg, 10)).* FROM ( SELECT topn_union_agg(reviewers) AS agg FROM popular_reviewer WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date )a ORDER BY 2 DESC, 1; - item | frequency -------+----------- + item | frequency +--------------------------------------------------------------------- 1 | 1240 2 | 1240 0 | 992 @@ -512,7 +512,7 @@ ORDER BY 2 DESC, 1; 6 | 992 (7 rows) -SELECT month, (topn(agg, 5)).* +SELECT month, (topn(agg, 5)).* FROM ( SELECT EXTRACT(MONTH FROM day) AS month, topn_union_agg(reviewers) AS agg FROM popular_reviewer @@ -521,8 +521,8 @@ FROM ( ORDER BY 1 )a ORDER BY 1, 3 DESC, 2; - month | item | frequency --------+------+----------- + month | item | frequency +--------------------------------------------------------------------- 6 | 1 | 1054 6 | 2 | 1054 6 | 3 | 992 @@ -537,12 +537,12 @@ ORDER BY 1, 3 DESC, 2; -- TODO the following queries will be supported after we fix #2265 -- They work for PG9.6 but not for PG10 -SELECT (topn(topn_union_agg(reviewers), 10)).* +SELECT (topn(topn_union_agg(reviewers), 10)).* FROM popular_reviewer WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date ORDER BY 2 DESC, 1; - item | frequency -------+----------- + item | frequency +--------------------------------------------------------------------- 1 | 1240 2 | 1240 0 | 992 @@ -555,8 +555,8 @@ ORDER BY 2 DESC, 1; SELECT (topn(topn_add_agg(user_id::text), 10)).* FROM customer_reviews ORDER BY 2 DESC, 1; - item | frequency -------+----------- + item | frequency +--------------------------------------------------------------------- 1 | 7843 2 | 7843 3 | 6851 diff --git a/src/test/regress/expected/disable_object_propagation.out b/src/test/regress/expected/disable_object_propagation.out index 08186ca11..78247223c 100644 --- a/src/test/regress/expected/disable_object_propagation.out +++ b/src/test/regress/expected/disable_object_propagation.out @@ -9,9 +9,9 @@ SET search_path TO disabled_object_propagation; -- verify the table gets created, which requires schema distribution to still work CREATE TABLE t1 (a int PRIMARY KEY , b int); SELECT create_distributed_table('t1','a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- verify types are not created, preventing distributed tables to be created unless created manually on the workers @@ -19,23 +19,23 @@ CREATE TYPE tt1 AS (a int , b int); CREATE TABLE t2 (a int PRIMARY KEY, b tt1); SELECT create_distributed_table('t2', 'a'); ERROR: type "disabled_object_propagation.tt1" does not exist -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx SELECT 1 FROM run_command_on_workers($$ BEGIN; SET LOCAL citus.enable_ddl_propagation TO off; CREATE TYPE disabled_object_propagation.tt1 AS (a int , b int); COMMIT; $$); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 1 (2 rows) SELECT create_distributed_table('t2', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- verify enum types are not created, preventing distributed tables to be created unless created manually on the workers @@ -43,23 +43,23 @@ CREATE TYPE tt2 AS ENUM ('a', 'b'); CREATE TABLE t3 (a int PRIMARY KEY, b tt2); SELECT create_distributed_table('t3', 'a'); ERROR: type "disabled_object_propagation.tt2" does not exist -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx SELECT 1 FROM run_command_on_workers($$ BEGIN; SET LOCAL citus.enable_ddl_propagation TO off; CREATE TYPE disabled_object_propagation.tt2 AS ENUM ('a', 'b'); COMMIT; $$); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 1 (2 rows) SELECT create_distributed_table('t3', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- verify ALTER TYPE statements are not propagated for types, even though they are marked distributed @@ -69,17 +69,17 @@ SET LOCAL citus.enable_object_propagation TO on; CREATE TYPE tt3 AS (a int, b int); CREATE TABLE t4 (a int PRIMARY KEY, b tt3); SELECT create_distributed_table('t4','a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) DROP TABLE t4; -- as long as the table is using the type some operations are hard to force COMMIT; -- verify the type is distributed SELECT count(*) FROM citus.pg_dist_object WHERE objid = 'disabled_object_propagation.tt3'::regtype::oid; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -97,8 +97,8 @@ SELECT row(nspname, typname, usename) JOIN pg_namespace ON (pg_namespace.oid = typnamespace) WHERE typname = 'tt3'; $$); - run_command_on_workers ------------------------------------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"(disabled_object_propagation,tt3,postgres)") (localhost,57638,t,"(disabled_object_propagation,tt3,postgres)") (2 rows) @@ -112,8 +112,8 @@ SELECT run_command_on_workers($$ WHERE pg_type.typname = 'tt3' GROUP BY pg_type.typname; $$); - run_command_on_workers ------------------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"(tt3,""a int4, b int4"")") (localhost,57638,t,"(tt3,""a int4, b int4"")") (2 rows) diff --git a/src/test/regress/expected/distributed_collations.out b/src/test/regress/expected/distributed_collations.out index 1562f8232..521147e15 100644 --- a/src/test/regress/expected/distributed_collations.out +++ b/src/test/regress/expected/distributed_collations.out @@ -3,8 +3,8 @@ CREATE USER collationuser; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE USER collationuser;$$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -23,8 +23,8 @@ JOIN pg_namespace nsp ON nsp.oid = c.collnamespace JOIN pg_authid a ON a.oid = c.collowner WHERE collname like 'german_phonebook%' ORDER BY 1,2,3; - collname | nspname | rolname -------------------+-----------------+---------- + collname | nspname | rolname +--------------------------------------------------------------------- german_phonebook | collation_tests | postgres (1 row) @@ -35,30 +35,30 @@ CREATE TABLE test_propagate(id int, t1 text COLLATE german_phonebook, INSERT INTO test_propagate VALUES (1, 'aesop', U&'\00E4sop'), (2, U&'Vo\1E9Er', 'Vossr'); SELECT create_distributed_table('test_propagate', 'id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Test COLLATE is pushed down SELECT * FROM collation_tests.test_propagate WHERE t2 < 'b'; - id | t1 | t2 -----+-------+------ + id | t1 | t2 +--------------------------------------------------------------------- 1 | aesop | äsop (1 row) SELECT * FROM collation_tests.test_propagate WHERE t2 < 'b' COLLATE "C"; - id | t1 | t2 -----+------+------- + id | t1 | t2 +--------------------------------------------------------------------- 2 | Voẞr | Vossr (1 row) -- Test range table with collated distribution column CREATE TABLE test_range(key text COLLATE german_phonebook, val int); SELECT create_distributed_table('test_range', 'key', 'range'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_empty_shard('test_range') AS new_shard_id @@ -76,8 +76,8 @@ SET client_min_messages TO debug; SELECT * FROM test_range WHERE key > 'Ab' AND key < U&'\00E4z'; DEBUG: Creating router plan DEBUG: Plan is router executable - key | val -------+----- + key | val +--------------------------------------------------------------------- äsop | 1 (1 row) @@ -88,8 +88,8 @@ JOIN pg_namespace nsp ON nsp.oid = c.collnamespace JOIN pg_authid a ON a.oid = c.collowner WHERE collname like 'german_phonebook%' ORDER BY 1,2,3; - collname | nspname | rolname --------------------------------+-----------------+---------- + collname | nspname | rolname +--------------------------------------------------------------------- german_phonebook | collation_tests | postgres german_phonebook_unpropagated | collation_tests | postgres (2 rows) @@ -105,8 +105,8 @@ JOIN pg_namespace nsp ON nsp.oid = c.collnamespace JOIN pg_authid a ON a.oid = c.collowner WHERE collname like 'german_phonebook%' ORDER BY 1,2,3; - collname | nspname | rolname --------------------------------+------------------+--------------- + collname | nspname | rolname +--------------------------------------------------------------------- german_phonebook2 | collation_tests2 | collationuser german_phonebook_unpropagated | collation_tests | postgres (2 rows) @@ -127,8 +127,8 @@ DROP SCHEMA collation_tests2 CASCADE; \c - - - :master_port DROP USER collationuser; SELECT run_command_on_workers($$DROP USER collationuser;$$); - run_command_on_workers ---------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP ROLE") (localhost,57638,t,"DROP ROLE") (2 rows) diff --git a/src/test/regress/expected/distributed_collations_conflict.out b/src/test/regress/expected/distributed_collations_conflict.out index 77ac86d42..8643ae290 100644 --- a/src/test/regress/expected/distributed_collations_conflict.out +++ b/src/test/regress/expected/distributed_collations_conflict.out @@ -1,7 +1,7 @@ CREATE SCHEMA collation_conflict; SELECT run_command_on_workers($$CREATE SCHEMA collation_conflict;$$); - run_command_on_workers -------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE SCHEMA") (localhost,57638,t,"CREATE SCHEMA") (2 rows) @@ -20,9 +20,9 @@ CREATE COLLATION caseinsensitive ( ); CREATE TABLE tblcoll(val text COLLATE caseinsensitive); SELECT create_reference_table('tblcoll'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port @@ -32,8 +32,8 @@ JOIN pg_namespace nsp ON nsp.oid = c.collnamespace JOIN pg_authid a ON a.oid = c.collowner WHERE collname like 'caseinsensitive%' ORDER BY 1,2,3; - collname | nspname | rolname ------------------+--------------------+---------- + collname | nspname | rolname +--------------------------------------------------------------------- caseinsensitive | collation_conflict | postgres (1 row) @@ -58,9 +58,9 @@ CREATE COLLATION caseinsensitive ( ); CREATE TABLE tblcoll(val text COLLATE caseinsensitive); SELECT create_reference_table('tblcoll'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port @@ -70,8 +70,8 @@ JOIN pg_namespace nsp ON nsp.oid = c.collnamespace JOIN pg_authid a ON a.oid = c.collowner WHERE collname like 'caseinsensitive%' ORDER BY 1,2,3; - collname | nspname | rolname ----------------------------------+--------------------+---------- + collname | nspname | rolname +--------------------------------------------------------------------- caseinsensitive | collation_conflict | postgres caseinsensitive(citus_backup_0) | collation_conflict | postgres (2 rows) @@ -80,14 +80,14 @@ ORDER BY 1,2,3; SET search_path TO collation_conflict; -- now test worker_create_or_replace_object directly SELECT worker_create_or_replace_object($$CREATE COLLATION collation_conflict.caseinsensitive (provider = 'icu', lc_collate = 'und-u-ks-level2', lc_ctype = 'und-u-ks-level2')$$); - worker_create_or_replace_object ---------------------------------- + worker_create_or_replace_object +--------------------------------------------------------------------- f (1 row) SELECT worker_create_or_replace_object($$CREATE COLLATION collation_conflict.caseinsensitive (provider = 'icu', lc_collate = 'und-u-ks-level2', lc_ctype = 'und-u-ks-level2')$$); - worker_create_or_replace_object ---------------------------------- + worker_create_or_replace_object +--------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/distributed_functions.out b/src/test/regress/expected/distributed_functions.out index b537b8fb2..6e63d15a2 100644 --- a/src/test/regress/expected/distributed_functions.out +++ b/src/test/regress/expected/distributed_functions.out @@ -3,8 +3,8 @@ CREATE USER functionuser; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE USER functionuser;$$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -130,9 +130,9 @@ CREATE TABLE statement_table(id int2); SET citus.replication_model TO 'statement'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('statement_table','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- create a table uses streaming-based replication (can be synced) @@ -140,38 +140,38 @@ CREATE TABLE streaming_table(id int); SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('streaming_table','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- make sure that none of the active and primary nodes hasmetadata -- at the start of the test select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary'; - bool_or ---------- + bool_or +--------------------------------------------------------------------- f (1 row) -- if not paremeters are supplied, we'd see that function doesn't have -- distribution_argument_index and colocationid SELECT create_distributed_function('"add_mi''xed_param_names"(int, int)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT distribution_argument_index is NULL, colocationid is NULL from citus.pg_dist_object WHERE objid = 'add_mi''xed_param_names(int, int)'::regprocedure; - ?column? | ?column? -----------+---------- + ?column? | ?column? +--------------------------------------------------------------------- t | t (1 row) -- also show that we can use the function SELECT * FROM run_command_on_workers('SELECT function_tests."add_mi''xed_param_names"(2,3);') ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+-------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 5 localhost | 57638 | t | 5 (2 rows) @@ -179,8 +179,8 @@ SELECT * FROM run_command_on_workers('SELECT function_tests."add_mi''xed_param_n -- make sure that none of the active and primary nodes hasmetadata -- since the function doesn't have a parameter select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary'; - bool_or ---------- + bool_or +--------------------------------------------------------------------- f (1 row) @@ -202,54 +202,54 @@ HINT: Set citus.replication_model to 'streaming' before creating distributed ta END; -- try to co-locate with a table that uses streaming replication SELECT create_distributed_function('dup(int)', '$1', colocate_with := 'streaming_table'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT * FROM run_command_on_workers('SELECT function_tests.dup(42);') ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+------------------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | (42,"42 is text") localhost | 57638 | t | (42,"42 is text") (2 rows) SELECT create_distributed_function('add(int,int)', '$1', colocate_with := 'streaming_table'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+-------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 5 localhost | 57638 | t | 5 (2 rows) SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) -- distribute aggregate SELECT create_distributed_function('sum2(int)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_function('my_rank("any")'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_function('agg_names(dup_result,dup_result)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) -- testing alter statements for a distributed function @@ -257,72 +257,72 @@ SELECT create_distributed_function('agg_names(dup_result,dup_result)'); -- ERROR: ROWS is not applicable when function does not return a set ALTER FUNCTION add(int,int) CALLED ON NULL INPUT IMMUTABLE SECURITY INVOKER PARALLEL UNSAFE LEAKPROOF COST 5; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) RETURNS NULL ON NULL INPUT STABLE SECURITY DEFINER PARALLEL RESTRICTED; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) STRICT VOLATILE PARALLEL SAFE; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) -- Test SET/RESET for alter function ALTER FUNCTION add(int,int) SET client_min_messages TO warning; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) SET client_min_messages TO error; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) SET client_min_messages TO debug; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) RESET client_min_messages; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) SET "citus.setting;'" TO 'hello '' world'; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) RESET "citus.setting;'"; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) SET search_path TO 'sch'';ma', public; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) @@ -332,8 +332,8 @@ ALTER FUNCTION add(int,int) SET client_min_messages FROM CURRENT; ERROR: unsupported ALTER FUNCTION ... SET ... FROM CURRENT for a distributed function HINT: SET FROM CURRENT is not supported for distributed functions, instead use the SET ... TO ... syntax with a constant value. SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) @@ -341,8 +341,8 @@ ALTER FUNCTION add(int,int) RETURNS NULL ON NULL INPUT SET client_min_messages F ERROR: unsupported ALTER FUNCTION ... SET ... FROM CURRENT for a distributed function HINT: SET FROM CURRENT is not supported for distributed functions, instead use the SET ... TO ... syntax with a constant value. SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) @@ -350,29 +350,29 @@ ALTER FUNCTION add(int,int) SET client_min_messages FROM CURRENT SECURITY DEFINE ERROR: unsupported ALTER FUNCTION ... SET ... FROM CURRENT for a distributed function HINT: SET FROM CURRENT is not supported for distributed functions, instead use the SET ... TO ... syntax with a constant value. SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) -- rename function and make sure the new name can be used on the workers while the old name can't ALTER FUNCTION add(int,int) RENAME TO add2; SELECT public.verify_function_is_same_on_workers('function_tests.add2(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+---------------------------------------------------------------------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: function function_tests.add(integer, integer) does not exist localhost | 57638 | f | ERROR: function function_tests.add(integer, integer) does not exist (2 rows) SELECT * FROM run_command_on_workers('SELECT function_tests.add2(2,3);') ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+-------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 5 localhost | 57638 | t | 5 (2 rows) @@ -380,8 +380,8 @@ SELECT * FROM run_command_on_workers('SELECT function_tests.add2(2,3);') ORDER B ALTER FUNCTION add2(int,int) RENAME TO add; ALTER AGGREGATE sum2(int) RENAME TO sum27; SELECT * FROM run_command_on_workers($$SELECT 1 from pg_proc where proname = 'sum27';$$) ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+-------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57638 | t | 1 (2 rows) @@ -390,8 +390,8 @@ ALTER AGGREGATE sum27(int) RENAME TO sum2; -- change the owner of the function and verify the owner has been changed on the workers ALTER FUNCTION add(int,int) OWNER TO functionuser; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) @@ -403,8 +403,8 @@ JOIN pg_user ON (usesysid = proowner) JOIN pg_namespace ON (pg_namespace.oid = pronamespace and nspname = 'function_tests') WHERE proname = 'add'; $$); - run_command_on_workers ---------------------------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"(functionuser,function_tests,add)") (localhost,57638,t,"(functionuser,function_tests,add)") (2 rows) @@ -416,8 +416,8 @@ JOIN pg_user ON (usesysid = proowner) JOIN pg_namespace ON (pg_namespace.oid = pronamespace and nspname = 'function_tests') WHERE proname = 'sum2'; $$); - run_command_on_workers ----------------------------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"(functionuser,function_tests,sum2)") (localhost,57638,t,"(functionuser,function_tests,sum2)") (2 rows) @@ -426,21 +426,21 @@ $$); -- the new schema has the function. ALTER FUNCTION add(int,int) SET SCHEMA function_tests2; SELECT public.verify_function_is_same_on_workers('function_tests2.add(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+---------------------------------------------------------------------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: function function_tests.add(integer, integer) does not exist localhost | 57638 | f | ERROR: function function_tests.add(integer, integer) does not exist (2 rows) SELECT * FROM run_command_on_workers('SELECT function_tests2.add(2,3);') ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+-------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 5 localhost | 57638 | t | 5 (2 rows) @@ -454,14 +454,14 @@ AS 'select $1 * $2;' -- I know, this is not an add, but the output will tell us IMMUTABLE RETURNS NULL ON NULL INPUT; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+-------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 6 localhost | 57638 | t | 6 (2 rows) @@ -477,8 +477,8 @@ DETAIL: Function "pg_catalog.citus_drop_trigger()" has a dependency on extensio DROP FUNCTION add(int,int); -- call should fail as function should have been dropped SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+---------------------------------------------------------------------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: function function_tests.add(integer, integer) does not exist localhost | 57638 | f | ERROR: function function_tests.add(integer, integer) does not exist (2 rows) @@ -486,8 +486,8 @@ SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY DROP AGGREGATE function_tests2.sum2(int); -- call should fail as aggregate should have been dropped SELECT * FROM run_command_on_workers('SELECT function_tests2.sum2(id) FROM (select 1 id, 2) subq;') ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+--------------------------------------------------------------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: function function_tests2.sum2(integer) does not exist localhost | 57638 | f | ERROR: function function_tests2.sum2(integer) does not exist (2 rows) @@ -495,15 +495,13 @@ SELECT * FROM run_command_on_workers('SELECT function_tests2.sum2(id) FROM (sele -- postgres doesn't accept parameter names in the regprocedure input SELECT create_distributed_function('add_with_param_names(val1 int, int)', 'val1'); ERROR: syntax error at or near "int" -LINE 1: SELECT create_distributed_function('add_with_param_names(val... - ^ CONTEXT: invalid type name "val1 int" -- invalid distribution_arg_name SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='test'); -ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid +ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function() SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='int'); -ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid +ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function() -- invalid distribution_arg_index SELECT create_distributed_function('add_with_param_names(int, int)', '$0'); @@ -522,7 +520,7 @@ SELECT create_distributed_function('add_with_param_names(int, int)', '$1a'); ERROR: invalid input syntax for integer: "1a" -- non existing column name SELECT create_distributed_function('add_with_param_names(int, int)', 'aaa'); -ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid +ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function() -- NULL function SELECT create_distributed_function(NULL); @@ -534,67 +532,67 @@ ERROR: colocate_with parameter should not be NULL HINT: To use the default value, set colocate_with option to "default" -- empty string distribution_arg_index SELECT create_distributed_function('add_with_param_names(int, int)', ''); -ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid +ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function() -- The first distributed function syncs the metadata to nodes -- and metadata syncing is not supported within transaction blocks BEGIN; SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='val1'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) ROLLBACK; -- make sure that none of the nodes have the function because we've rollbacked SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add_with_param_names';$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) (2 rows) -- make sure that none of the active and primary nodes hasmetadata select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary'; - bool_or ---------- + bool_or +--------------------------------------------------------------------- t (1 row) -- valid distribution with distribution_arg_name SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='val1'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) -- make sure that the primary nodes are now metadata synced select bool_and(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary'; - bool_and ----------- + bool_and +--------------------------------------------------------------------- t (1 row) -- make sure that both of the nodes have the function because we've succeeded SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add_with_param_names';$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,1) (localhost,57638,t,1) (2 rows) -- valid distribution with distribution_arg_name -- case insensitive SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='VaL1'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) -- valid distribution with distribution_arg_index SELECT create_distributed_function('add_with_param_names(int, int)','$1'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) -- a function cannot be colocated with a table that is not "streaming" replicated @@ -602,9 +600,9 @@ SET citus.shard_replication_factor TO 2; CREATE TABLE replicated_table_func_test (a int); SET citus.replication_model TO "statement"; SELECT create_distributed_table('replicated_table_func_test', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_function('add_with_param_names(int, int)', '$1', colocate_with:='replicated_table_func_test'); @@ -612,9 +610,9 @@ ERROR: cannot colocate function "add_with_param_names" and table "replicated_ta DETAIL: Citus currently only supports colocating function with distributed tables that are created using streaming replication model. HINT: When distributing tables make sure that citus.replication_model = 'streaming' SELECT public.wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) -- a function can be colocated with a different distribution argument type @@ -623,20 +621,20 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE replicated_table_func_test_2 (a bigint); SET citus.replication_model TO "streaming"; SELECT create_distributed_table('replicated_table_func_test_2', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_function('add_with_param_names(int, int)', 'val1', colocate_with:='replicated_table_func_test_2'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) -- colocate_with cannot be used without distribution key SELECT create_distributed_function('add_with_param_names(int, int)', colocate_with:='replicated_table_func_test_2'); -ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid +ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid HINT: To provide "colocate_with" option, the distribution argument parameter should also be provided -- a function cannot be colocated with a local table CREATE TABLE replicated_table_func_test_3 (a bigint); @@ -644,9 +642,9 @@ SELECT create_distributed_function('add_with_param_names(int, int)', 'val1', col ERROR: relation replicated_table_func_test_3 is not distributed -- a function cannot be colocated with a reference table SELECT create_reference_table('replicated_table_func_test_3'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_function('add_with_param_names(int, int)', 'val1', colocate_with:='replicated_table_func_test_3'); @@ -656,15 +654,15 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE replicated_table_func_test_4 (a int); SET citus.replication_model TO "streaming"; SELECT create_distributed_table('replicated_table_func_test_4', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_function('add_with_param_names(int, int)', '$1', colocate_with:='replicated_table_func_test_4'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) -- show that the colocationIds are the same @@ -672,25 +670,25 @@ SELECT pg_dist_partition.colocationid = objects.colocationid as table_and_functi FROM pg_dist_partition, citus.pg_dist_object as objects WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass AND objects.objid = 'add_with_param_names(int, int)'::regprocedure; - table_and_function_colocated ------------------------------- + table_and_function_colocated +--------------------------------------------------------------------- t (1 row) -- now, re-distributed with the default colocation option, we should still see that the same colocation -- group preserved, because we're using the default shard creation settings SELECT create_distributed_function('add_with_param_names(int, int)', 'val1'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT pg_dist_partition.colocationid = objects.colocationid as table_and_function_colocated FROM pg_dist_partition, citus.pg_dist_object as objects WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass AND objects.objid = 'add_with_param_names(int, int)'::regprocedure; - table_and_function_colocated ------------------------------- + table_and_function_colocated +--------------------------------------------------------------------- t (1 row) @@ -699,32 +697,32 @@ WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass -- path, we rely on postgres for implicit coersions, and users for explicit coersions -- to coerce the values SELECT create_distributed_function('add_numeric(numeric, numeric)', '$1', colocate_with:='replicated_table_func_test_4'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT pg_dist_partition.colocationid = objects.colocationid as table_and_function_colocated FROM pg_dist_partition, citus.pg_dist_object as objects WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass AND objects.objid = 'add_numeric(numeric, numeric)'::regprocedure; - table_and_function_colocated ------------------------------- + table_and_function_colocated +--------------------------------------------------------------------- t (1 row) SELECT create_distributed_function('add_text(text, text)', '$1', colocate_with:='replicated_table_func_test_4'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT pg_dist_partition.colocationid = objects.colocationid as table_and_function_colocated FROM pg_dist_partition, citus.pg_dist_object as objects WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass AND objects.objid = 'add_text(text, text)'::regprocedure; - table_and_function_colocated ------------------------------- + table_and_function_colocated +--------------------------------------------------------------------- t (1 row) @@ -740,18 +738,18 @@ ERROR: cannot distribute the function "add_with_param_names" since there is no HINT: Provide a distributed table via "colocate_with" option to create_distributed_function() -- sync metadata to workers for consistent results when clearing objects SELECT public.wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 4; CREATE TABLE test (id int, name text); SELECT create_distributed_table('test','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test VALUES (3,'three'); @@ -764,9 +762,9 @@ BEGIN END; $$ LANGUAGE plpgsql; SELECT create_distributed_function('increment(int)', '$1', colocate_with := 'test'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) -- call a distributed function inside a pl/pgsql function @@ -779,24 +777,24 @@ BEGIN END; $$ LANGUAGE plpgsql; SELECT test_func_calls_dist_func(); - test_func_calls_dist_func ---------------------------- - + test_func_calls_dist_func +--------------------------------------------------------------------- + (1 row) SELECT test_func_calls_dist_func(); - test_func_calls_dist_func ---------------------------- - + test_func_calls_dist_func +--------------------------------------------------------------------- + (1 row) -- test an INSERT..SELECT via the coordinator just because it is kind of funky INSERT INTO test SELECT increment(3); SELECT * FROM test ORDER BY id; - id | name -----+------- + id | name +--------------------------------------------------------------------- 3 | three - 4 | + 4 | (2 rows) DROP TABLE test; @@ -805,10 +803,10 @@ DROP SCHEMA function_tests CASCADE; DROP SCHEMA function_tests2 CASCADE; -- clear objects SELECT stop_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isactive AND noderole = 'primary'; - stop_metadata_sync_to_node ----------------------------- - - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + + (2 rows) -- This is hacky, but we should clean-up the resources as below @@ -828,8 +826,8 @@ DROP SCHEMA function_tests2 CASCADE; \c - - - :master_port DROP USER functionuser; SELECT run_command_on_workers($$DROP USER functionuser$$); - run_command_on_workers ---------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP ROLE") (localhost,57638,t,"DROP ROLE") (2 rows) diff --git a/src/test/regress/expected/distributed_functions_conflict.out b/src/test/regress/expected/distributed_functions_conflict.out index 18823379f..995668e64 100644 --- a/src/test/regress/expected/distributed_functions_conflict.out +++ b/src/test/regress/expected/distributed_functions_conflict.out @@ -2,8 +2,8 @@ -- Note in PG12 we use CREATE OR REPLACE AGGREGATE, thus the renaming does not occur CREATE SCHEMA proc_conflict; SELECT run_command_on_workers($$CREATE SCHEMA proc_conflict;$$); - run_command_on_workers -------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE SCHEMA") (localhost,57638,t,"CREATE SCHEMA") (2 rows) @@ -31,9 +31,9 @@ CREATE AGGREGATE existing_agg(int) ( STYPE = int ); SELECT create_distributed_function('existing_agg(int)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port @@ -44,8 +44,8 @@ WITH data (val) AS ( union all select 6 ) SELECT existing_agg(val) FROM data; - existing_agg --------------- + existing_agg +--------------------------------------------------------------------- 78 (1 row) @@ -57,8 +57,8 @@ WITH data (val) AS ( union all select 6 ) SELECT existing_agg(val) FROM data; - existing_agg --------------- + existing_agg +--------------------------------------------------------------------- 78 (1 row) @@ -90,9 +90,9 @@ CREATE AGGREGATE existing_agg(int) ( STYPE = int ); SELECT create_distributed_function('existing_agg(int)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port @@ -103,8 +103,8 @@ WITH data (val) AS ( union all select 6 ) SELECT existing_agg(val) FROM data; - existing_agg --------------- + existing_agg +--------------------------------------------------------------------- 76 (1 row) @@ -116,8 +116,8 @@ WITH data (val) AS ( union all select 6 ) SELECT existing_agg(val) FROM data; - existing_agg --------------- + existing_agg +--------------------------------------------------------------------- 76 (1 row) @@ -128,14 +128,14 @@ BEGIN END; $$ LANGUAGE plpgsql STRICT IMMUTABLE; SELECT worker_create_or_replace_object('CREATE AGGREGATE proc_conflict.existing_agg(integer) (STYPE = integer,SFUNC = proc_conflict.existing_func2)'); - worker_create_or_replace_object ---------------------------------- + worker_create_or_replace_object +--------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE AGGREGATE proc_conflict.existing_agg(integer) (STYPE = integer,SFUNC = proc_conflict.existing_func2)'); - worker_create_or_replace_object ---------------------------------- + worker_create_or_replace_object +--------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/distributed_procedure.out b/src/test/regress/expected/distributed_procedure.out index 31258530c..d819c4294 100644 --- a/src/test/regress/expected/distributed_procedure.out +++ b/src/test/regress/expected/distributed_procedure.out @@ -3,8 +3,8 @@ CREATE USER procedureuser; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE USER procedureuser;$$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -24,8 +24,8 @@ $proc$; ALTER SYSTEM SET citus.metadata_sync_interval TO 3000; ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) @@ -38,33 +38,33 @@ CREATE TABLE colocation_table(id text); SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('colocation_table','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_function('raise_info(text)', '$1', colocate_with := 'colocation_table'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+-------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | CALL localhost | 57638 | t | CALL (2 rows) SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) @@ -73,65 +73,65 @@ SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(tex -- ERROR: ROWS is not applicable when function does not return a set ALTER PROCEDURE raise_info(text) SECURITY INVOKER; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) ALTER PROCEDURE raise_info(text) SECURITY DEFINER; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) -- Test SET/RESET for alter procedure ALTER PROCEDURE raise_info(text) SET client_min_messages TO warning; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) ALTER PROCEDURE raise_info(text) SET client_min_messages TO error; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) ALTER PROCEDURE raise_info(text) SET client_min_messages TO debug; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) ALTER PROCEDURE raise_info(text) RESET client_min_messages; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) -- rename function and make sure the new name can be used on the workers while the old name can't ALTER PROCEDURE raise_info(text) RENAME TO raise_info2; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info2(text)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+---------------------------------------------------------------------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist (2 rows) SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info2('hello');$$) ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+-------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | CALL localhost | 57638 | t | CALL (2 rows) @@ -140,8 +140,8 @@ ALTER PROCEDURE raise_info2(text) RENAME TO raise_info; -- change the owner of the function and verify the owner has been changed on the workers ALTER PROCEDURE raise_info(text) OWNER TO procedureuser; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) @@ -152,8 +152,8 @@ JOIN pg_user ON (usesysid = proowner) JOIN pg_namespace ON (pg_namespace.oid = pronamespace) WHERE proname = 'raise_info'; $$); - run_command_on_workers ------------------------------------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"(procedureuser,procedure_tests,raise_info)") (localhost,57638,t,"(procedureuser,procedure_tests,raise_info)") (2 rows) @@ -162,21 +162,21 @@ $$); -- the new schema has the function. ALTER PROCEDURE raise_info(text) SET SCHEMA procedure_tests2; SELECT public.verify_function_is_same_on_workers('procedure_tests2.raise_info(text)'); - verify_function_is_same_on_workers ------------------------------------- + verify_function_is_same_on_workers +--------------------------------------------------------------------- t (1 row) SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+---------------------------------------------------------------------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist (2 rows) SELECT * FROM run_command_on_workers($$CALL procedure_tests2.raise_info('hello');$$) ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+-------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | CALL localhost | 57638 | t | CALL (2 rows) @@ -185,8 +185,8 @@ ALTER PROCEDURE procedure_tests2.raise_info(text) SET SCHEMA procedure_tests; DROP PROCEDURE raise_info(text); -- call should fail as procedure should have been dropped SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+---------------------------------------------------------------------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist (2 rows) @@ -194,24 +194,24 @@ SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello'); SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA procedure_tests CASCADE; SELECT run_command_on_workers($$DROP SCHEMA procedure_tests CASCADE;$$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") (2 rows) DROP SCHEMA procedure_tests2 CASCADE; SELECT run_command_on_workers($$DROP SCHEMA procedure_tests2 CASCADE;$$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") (2 rows) DROP USER procedureuser; SELECT run_command_on_workers($$DROP USER procedureuser;$$); - run_command_on_workers ---------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP ROLE") (localhost,57638,t,"DROP ROLE") (2 rows) diff --git a/src/test/regress/expected/distributed_types.out b/src/test/regress/expected/distributed_types.out index 571e40645..33957dfb4 100644 --- a/src/test/regress/expected/distributed_types.out +++ b/src/test/regress/expected/distributed_types.out @@ -3,8 +3,8 @@ CREATE USER typeuser; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE USER typeuser;$$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -17,15 +17,15 @@ SET citus.shard_count TO 4; CREATE TYPE tc1 AS (a int, b int); CREATE TABLE t1 (a int PRIMARY KEY, b tc1); SELECT create_distributed_table('t1','a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO t1 VALUES (1, (2,3)::tc1); SELECT * FROM t1; - a | b ----+------- + a | b +--------------------------------------------------------------------- 1 | (2,3) (1 row) @@ -37,15 +37,15 @@ INSERT INTO t1 VALUES (6, (7,8)::type_tests2.tc1_newname); -- insert with a cast CREATE TYPE te1 AS ENUM ('one', 'two', 'three'); CREATE TABLE t2 (a int PRIMARY KEY, b te1); SELECT create_distributed_table('t2','a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO t2 VALUES (1, 'two'); SELECT * FROM t2; - a | b ----+----- + a | b +--------------------------------------------------------------------- 1 | two (1 row) @@ -55,8 +55,8 @@ ALTER TYPE te1 RENAME TO te1_newname; ALTER TYPE te1_newname ADD VALUE 'four'; UPDATE t2 SET b = 'four'; SELECT * FROM t2; - a | b ----+------ + a | b +--------------------------------------------------------------------- 1 | four (1 row) @@ -68,15 +68,15 @@ BEGIN; CREATE TYPE tc2 AS (a int, b int); CREATE TABLE t3 (a int PRIMARY KEY, b tc2); SELECT create_distributed_table('t3','a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO t3 VALUES (4, (5,6)::tc2); SELECT * FROM t3; - a | b ----+------- + a | b +--------------------------------------------------------------------- 4 | (5,6) (1 row) @@ -86,15 +86,15 @@ BEGIN; CREATE TYPE te2 AS ENUM ('yes', 'no'); CREATE TABLE t4 (a int PRIMARY KEY, b te2); SELECT create_distributed_table('t4','a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO t4 VALUES (1, 'yes'); SELECT * FROM t4; - a | b ----+----- + a | b +--------------------------------------------------------------------- 1 | yes (1 row) @@ -102,14 +102,14 @@ SELECT * FROM t4; COMMIT; -- verify order of enum labels SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'type_tests.te2'::regtype; - string_agg ------------- + string_agg +--------------------------------------------------------------------- yes,no (1 row) SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'type_tests.te2'::regtype;$$); - run_command_on_workers ------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"yes,no") (localhost,57638,t,"yes,no") (2 rows) @@ -124,9 +124,9 @@ CREATE TYPE te3 AS ENUM ('a','b'); RESET citus.enable_ddl_propagation; CREATE TABLE t5 (a int PRIMARY KEY, b tc5[], c te3); SELECT create_distributed_table('t5','a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- test adding an attribute to a type and a column to a table both for a non-distributed type @@ -144,36 +144,36 @@ INSERT INTO t5 VALUES (1, NULL, 'a', 'd', (1,2,(4,5)::tc6c)::tc6); -- test renaming an attribute of a distrbuted type and read it by its new name to verify propagation ALTER TYPE tc6 RENAME ATTRIBUTE b TO d; SELECT (e::tc6).d FROM t5 ORDER BY 1; - d ---- + d +--------------------------------------------------------------------- 2 (1 row) -- change owner of supported types and check ownership on remote server ALTER TYPE te4 OWNER TO typeuser; SELECT typname, usename FROM pg_type, pg_user where typname = 'te4' and typowner = usesysid; - typname | usename ----------+---------- + typname | usename +--------------------------------------------------------------------- te4 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te4' and typowner = usesysid;$$); - run_command_on_workers --------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"(te4,typeuser)") (localhost,57638,t,"(te4,typeuser)") (2 rows) ALTER TYPE tc6 OWNER TO typeuser; SELECT typname, usename FROM pg_type, pg_user where typname = 'tc6' and typowner = usesysid; - typname | usename ----------+---------- + typname | usename +--------------------------------------------------------------------- tc6 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc6' and typowner = usesysid;$$); - run_command_on_workers --------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"(tc6,typeuser)") (localhost,57638,t,"(tc6,typeuser)") (2 rows) @@ -190,61 +190,61 @@ CREATE TYPE te6 AS ENUM ('a','b','c'); RESET citus.enable_ddl_propagation; CREATE TABLE t6 (a int, b tc8, c te6); SELECT create_distributed_table('t6', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) RESET ROLE; -- test ownership of all types SELECT typname, usename FROM pg_type, pg_user where typname = 'tc7' and typowner = usesysid; - typname | usename ----------+---------- + typname | usename +--------------------------------------------------------------------- tc7 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc7' and typowner = usesysid;$$); - run_command_on_workers --------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"(tc7,typeuser)") (localhost,57638,t,"(tc7,typeuser)") (2 rows) SELECT typname, usename FROM pg_type, pg_user where typname = 'te5' and typowner = usesysid; - typname | usename ----------+---------- + typname | usename +--------------------------------------------------------------------- te5 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te5' and typowner = usesysid;$$); - run_command_on_workers --------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"(te5,typeuser)") (localhost,57638,t,"(te5,typeuser)") (2 rows) SELECT typname, usename FROM pg_type, pg_user where typname = 'tc8' and typowner = usesysid; - typname | usename ----------+---------- + typname | usename +--------------------------------------------------------------------- tc8 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc8' and typowner = usesysid;$$); - run_command_on_workers --------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"(tc8,typeuser)") (localhost,57638,t,"(tc8,typeuser)") (2 rows) SELECT typname, usename FROM pg_type, pg_user where typname = 'te6' and typowner = usesysid; - typname | usename ----------+---------- + typname | usename +--------------------------------------------------------------------- te6 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te6' and typowner = usesysid;$$); - run_command_on_workers --------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"(te6,typeuser)") (localhost,57638,t,"(te6,typeuser)") (2 rows) @@ -257,13 +257,13 @@ DROP TYPE tc3, tc4, tc5 CASCADE; NOTICE: drop cascades to column b of table t5 -- test if the types are deleted SELECT typname FROM pg_type, pg_user where typname IN ('te3','tc3','tc4','tc5') and typowner = usesysid ORDER BY typname; - typname ---------- + typname +--------------------------------------------------------------------- (0 rows) SELECT run_command_on_workers($$SELECT typname FROM pg_type, pg_user where typname IN ('te3','tc3','tc4','tc5') and typowner = usesysid ORDER BY typname;$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"") (localhost,57638,t,"") (2 rows) @@ -301,9 +301,9 @@ CREATE TYPE distributed_enum_type AS ENUM ('a', 'c'); -- enforce distribution of types in every case CREATE TABLE type_proc (a int, b distributed_composite_type, c distributed_enum_type); SELECT create_distributed_table('type_proc','a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) DROP TABLE type_proc; @@ -330,14 +330,14 @@ CREATE TYPE feature_flag_composite_type AS (a int, b int); CREATE TYPE feature_flag_enum_type AS ENUM ('a', 'b'); -- verify types do not exist on workers SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type'); - count -------- + count +--------------------------------------------------------------------- 2 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) (2 rows) @@ -345,20 +345,20 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN (' -- verify they are still distributed when required CREATE TABLE feature_flag_table (a int PRIMARY KEY, b feature_flag_composite_type, c feature_flag_enum_type); SELECT create_distributed_table('feature_flag_table','a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type'); - count -------- + count +--------------------------------------------------------------------- 2 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,2) (localhost,57638,t,2) (2 rows) @@ -368,24 +368,24 @@ RESET citus.enable_create_type_propagation; SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA type_tests CASCADE; SELECT run_command_on_workers($$DROP SCHEMA type_tests CASCADE;$$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") (2 rows) DROP SCHEMA type_tests2 CASCADE; SELECT run_command_on_workers($$DROP SCHEMA type_tests2 CASCADE;$$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") (2 rows) DROP USER typeuser; SELECT run_command_on_workers($$DROP USER typeuser;$$); - run_command_on_workers ---------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP ROLE") (localhost,57638,t,"DROP ROLE") (2 rows) diff --git a/src/test/regress/expected/distributed_types_conflict.out b/src/test/regress/expected/distributed_types_conflict.out index 8b0459d1d..077b9c6a2 100644 --- a/src/test/regress/expected/distributed_types_conflict.out +++ b/src/test/regress/expected/distributed_types_conflict.out @@ -1,8 +1,8 @@ SET citus.next_shard_id TO 20020000; CREATE SCHEMA type_conflict; SELECT run_command_on_workers($$CREATE SCHEMA type_conflict;$$); - run_command_on_workers -------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE SCHEMA") (localhost,57638,t,"CREATE SCHEMA") (2 rows) @@ -33,15 +33,15 @@ SET search_path TO type_conflict; WHERE pg_class.relname = 'local_table' AND attnum > 0 ORDER BY attnum; - relname | attname | typname --------------+---------+---------------------------------- + relname | attname | typname +--------------------------------------------------------------------- local_table | a | int4 local_table | b | my_precious_type(citus_backup_0) (2 rows) SELECT * FROM local_table; - a | b -----+---------------------------- + a | b +--------------------------------------------------------------------- 42 | ("always bring a towel",t) (1 row) @@ -49,38 +49,38 @@ SELECT * FROM local_table; SET search_path TO type_conflict; -- make sure worker_create_or_replace correctly generates new names while types are existing SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type AS (a int, b int);'); - worker_create_or_replace_object ---------------------------------- + worker_create_or_replace_object +--------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type AS (a int, b int, c int);'); - worker_create_or_replace_object ---------------------------------- + worker_create_or_replace_object +--------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type AS (a int, b int, c int, d int);'); - worker_create_or_replace_object ---------------------------------- + worker_create_or_replace_object +--------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type_with_a_really_long_name_that_truncates AS (a int, b int);'); - worker_create_or_replace_object ---------------------------------- + worker_create_or_replace_object +--------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type_with_a_really_long_name_that_truncates AS (a int, b int, c int);'); - worker_create_or_replace_object ---------------------------------- + worker_create_or_replace_object +--------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type_with_a_really_long_name_that_truncates AS (a int, b int, c int, d int);'); - worker_create_or_replace_object ---------------------------------- + worker_create_or_replace_object +--------------------------------------------------------------------- t (1 row) @@ -93,8 +93,8 @@ FROM pg_attribute JOIN pg_type AS atttype ON (atttypid = atttype.oid) WHERE pg_type.typname LIKE 'multi_conflicting_type%' GROUP BY pg_type.typname; - typname | fields ------------------------------------------------------------------+-------------------------------- + typname | fields +--------------------------------------------------------------------- multi_conflicting_type | a int4, b int4, c int4, d int4 multi_conflicting_type(citus_backup_0) | a int4, b int4 multi_conflicting_type(citus_backup_1) | a int4, b int4, c int4 diff --git a/src/test/regress/expected/distributed_types_xact_add_enum_value.out b/src/test/regress/expected/distributed_types_xact_add_enum_value.out index c5e818c36..fbfee78ac 100644 --- a/src/test/regress/expected/distributed_types_xact_add_enum_value.out +++ b/src/test/regress/expected/distributed_types_xact_add_enum_value.out @@ -1,7 +1,7 @@ SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven; - version_above_eleven ----------------------- + version_above_eleven +--------------------------------------------------------------------- t (1 row) @@ -14,15 +14,15 @@ BEGIN; CREATE TYPE xact_enum_edit AS ENUM ('yes', 'no'); CREATE TABLE t1 (a int PRIMARY KEY, b xact_enum_edit); SELECT create_distributed_table('t1','a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO t1 VALUES (1, 'yes'); SELECT * FROM t1; - a | b ----+----- + a | b +--------------------------------------------------------------------- 1 | yes (1 row) @@ -32,14 +32,14 @@ ALTER TYPE xact_enum_edit ADD VALUE 'maybe'; ABORT; -- maybe should not be on the workers SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype; - string_agg ------------- + string_agg +--------------------------------------------------------------------- yes,no (1 row) SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$); - run_command_on_workers ------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"yes,no") (localhost,57638,t,"yes,no") (2 rows) @@ -49,14 +49,14 @@ ALTER TYPE xact_enum_edit ADD VALUE 'maybe'; COMMIT; -- maybe should be on the workers (pg12 and above) SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype; - string_agg --------------- + string_agg +--------------------------------------------------------------------- yes,no,maybe (1 row) SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$); - run_command_on_workers ------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"yes,no,maybe") (localhost,57638,t,"yes,no,maybe") (2 rows) @@ -65,8 +65,8 @@ SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumso SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA xact_enum_type CASCADE; SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") (2 rows) diff --git a/src/test/regress/expected/distributed_types_xact_add_enum_value_0.out b/src/test/regress/expected/distributed_types_xact_add_enum_value_0.out index 934dcaf06..398c616c5 100644 --- a/src/test/regress/expected/distributed_types_xact_add_enum_value_0.out +++ b/src/test/regress/expected/distributed_types_xact_add_enum_value_0.out @@ -1,7 +1,7 @@ SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven; - version_above_eleven ----------------------- + version_above_eleven +--------------------------------------------------------------------- f (1 row) @@ -14,15 +14,15 @@ BEGIN; CREATE TYPE xact_enum_edit AS ENUM ('yes', 'no'); CREATE TABLE t1 (a int PRIMARY KEY, b xact_enum_edit); SELECT create_distributed_table('t1','a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO t1 VALUES (1, 'yes'); SELECT * FROM t1; - a | b ----+----- + a | b +--------------------------------------------------------------------- 1 | yes (1 row) @@ -33,14 +33,14 @@ ERROR: ALTER TYPE ... ADD cannot run inside a transaction block ABORT; -- maybe should not be on the workers SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype; - string_agg ------------- + string_agg +--------------------------------------------------------------------- yes,no (1 row) SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$); - run_command_on_workers ------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"yes,no") (localhost,57638,t,"yes,no") (2 rows) @@ -51,14 +51,14 @@ ERROR: ALTER TYPE ... ADD cannot run inside a transaction block COMMIT; -- maybe should be on the workers (pg12 and above) SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype; - string_agg ------------- + string_agg +--------------------------------------------------------------------- yes,no (1 row) SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$); - run_command_on_workers ------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"yes,no") (localhost,57638,t,"yes,no") (2 rows) @@ -67,8 +67,8 @@ SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumso SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA xact_enum_type CASCADE; SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") (2 rows) diff --git a/src/test/regress/expected/dml_recursive.out b/src/test/regress/expected/dml_recursive.out index 3b0b322a4..c159862a5 100644 --- a/src/test/regress/expected/dml_recursive.out +++ b/src/test/regress/expected/dml_recursive.out @@ -3,23 +3,23 @@ SET search_path TO recursive_dml_queries, public; SET citus.next_shard_id TO 2370000; CREATE TABLE recursive_dml_queries.distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('distributed_table', 'tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE recursive_dml_queries.second_distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('second_distributed_table', 'tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE recursive_dml_queries.reference_table (id text, name text); SELECT create_reference_table('reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE recursive_dml_queries.local_table (id text, name text); @@ -52,10 +52,10 @@ WHERE foo.avg_tenant_id::int::text = reference_table.id RETURNING reference_table.name; -DEBUG: generating subplan 4_1 for subquery SELECT avg((tenant_id)::integer) AS avg_tenant_id FROM recursive_dml_queries.second_distributed_table -DEBUG: Plan 4 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.reference_table SET name = ('new_'::text OPERATOR(pg_catalog.||) reference_table.name) FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) reference_table.id) RETURNING reference_table.name - name -------------- +DEBUG: generating subplan XXX_1 for subquery SELECT avg((tenant_id)::integer) AS avg_tenant_id FROM recursive_dml_queries.second_distributed_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.reference_table SET name = ('new_'::text OPERATOR(pg_catalog.||) reference_table.name) FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) reference_table.id) RETURNING reference_table.name + name +--------------------------------------------------------------------- new_user_50 (1 row) @@ -85,10 +85,10 @@ WHERE AND second_distributed_table.dept IN (2) RETURNING second_distributed_table.tenant_id, second_distributed_table.dept; -DEBUG: generating subplan 6_1 for subquery SELECT DISTINCT ON (tenant_id) tenant_id, max(dept) AS max_dept FROM (SELECT second_distributed_table.dept, second_distributed_table.tenant_id FROM recursive_dml_queries.second_distributed_table, recursive_dml_queries.distributed_table WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.=) second_distributed_table.tenant_id)) foo_inner GROUP BY tenant_id ORDER BY tenant_id DESC -DEBUG: Plan 6 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.second_distributed_table SET dept = (foo.max_dept OPERATOR(pg_catalog.*) 2) FROM (SELECT intermediate_result.tenant_id, intermediate_result.max_dept FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, max_dept integer)) foo WHERE ((foo.tenant_id OPERATOR(pg_catalog.<>) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) 2)) RETURNING second_distributed_table.tenant_id, second_distributed_table.dept - tenant_id | dept ------------+------ +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT ON (tenant_id) tenant_id, max(dept) AS max_dept FROM (SELECT second_distributed_table.dept, second_distributed_table.tenant_id FROM recursive_dml_queries.second_distributed_table, recursive_dml_queries.distributed_table WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.=) second_distributed_table.tenant_id)) foo_inner GROUP BY tenant_id ORDER BY tenant_id DESC +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.second_distributed_table SET dept = (foo.max_dept OPERATOR(pg_catalog.*) 2) FROM (SELECT intermediate_result.tenant_id, intermediate_result.max_dept FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, max_dept integer)) foo WHERE ((foo.tenant_id OPERATOR(pg_catalog.<>) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) 2)) RETURNING second_distributed_table.tenant_id, second_distributed_table.dept + tenant_id | dept +--------------------------------------------------------------------- 12 | 18 2 | 18 22 | 18 @@ -135,9 +135,9 @@ FROM WHERE foo.tenant_id != second_distributed_table.tenant_id AND second_distributed_table.dept IN (3); -DEBUG: generating subplan 8_1 for subquery SELECT second_distributed_table.tenant_id FROM recursive_dml_queries.second_distributed_table, recursive_dml_queries.distributed_table WHERE ((distributed_table.tenant_id OPERATOR(pg_catalog.=) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) ANY (ARRAY[4, 5]))) -DEBUG: generating subplan 8_2 for subquery SELECT DISTINCT foo_inner_1.tenant_id FROM (SELECT second_distributed_table.dept, second_distributed_table.tenant_id FROM recursive_dml_queries.second_distributed_table, recursive_dml_queries.distributed_table WHERE ((distributed_table.tenant_id OPERATOR(pg_catalog.=) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) ANY (ARRAY[3, 4])))) foo_inner_1, (SELECT intermediate_result.tenant_id FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) foo_inner_2 WHERE (foo_inner_1.tenant_id OPERATOR(pg_catalog.<>) foo_inner_2.tenant_id) -DEBUG: Plan 8 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.second_distributed_table SET dept = ((foo.tenant_id)::integer OPERATOR(pg_catalog./) 4) FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) foo WHERE ((foo.tenant_id OPERATOR(pg_catalog.<>) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) 3)) +DEBUG: generating subplan XXX_1 for subquery SELECT second_distributed_table.tenant_id FROM recursive_dml_queries.second_distributed_table, recursive_dml_queries.distributed_table WHERE ((distributed_table.tenant_id OPERATOR(pg_catalog.=) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) ANY (ARRAY[4, 5]))) +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT foo_inner_1.tenant_id FROM (SELECT second_distributed_table.dept, second_distributed_table.tenant_id FROM recursive_dml_queries.second_distributed_table, recursive_dml_queries.distributed_table WHERE ((distributed_table.tenant_id OPERATOR(pg_catalog.=) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) ANY (ARRAY[3, 4])))) foo_inner_1, (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) foo_inner_2 WHERE (foo_inner_1.tenant_id OPERATOR(pg_catalog.<>) foo_inner_2.tenant_id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.second_distributed_table SET dept = ((foo.tenant_id)::integer OPERATOR(pg_catalog./) 4) FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) foo WHERE ((foo.tenant_id OPERATOR(pg_catalog.<>) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) 3)) -- we currently do not allow local tables in modification queries UPDATE distributed_table @@ -154,10 +154,10 @@ WHERE foo.avg_tenant_id::int::text = distributed_table.tenant_id RETURNING distributed_table.*; -DEBUG: generating subplan 11_1 for subquery SELECT avg((id)::integer) AS avg_tenant_id FROM recursive_dml_queries.local_table -DEBUG: Plan 11 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = (foo.avg_tenant_id)::integer FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) distributed_table.tenant_id) RETURNING distributed_table.tenant_id, distributed_table.dept, distributed_table.info - tenant_id | dept | info ------------+------+------------------------ +DEBUG: generating subplan XXX_1 for subquery SELECT avg((id)::integer) AS avg_tenant_id FROM recursive_dml_queries.local_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = (foo.avg_tenant_id)::integer FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) distributed_table.tenant_id) RETURNING distributed_table.tenant_id, distributed_table.dept, distributed_table.info + tenant_id | dept | info +--------------------------------------------------------------------- 50 | 50 | {"f1": 50, "f2": 2500} (1 row) @@ -177,10 +177,10 @@ WHERE foo.avg_tenant_id::int::text = distributed_table.tenant_id RETURNING distributed_table.*; -DEBUG: generating subplan 12_1 for subquery SELECT avg((tenant_id)::integer) AS avg_tenant_id FROM (SELECT distributed_table.tenant_id, reference_table.name FROM recursive_dml_queries.distributed_table, recursive_dml_queries.reference_table WHERE ((distributed_table.dept)::text OPERATOR(pg_catalog.=) reference_table.id) ORDER BY reference_table.name DESC, distributed_table.tenant_id DESC) tenant_ids -DEBUG: Plan 12 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = (foo.avg_tenant_id)::integer FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) distributed_table.tenant_id) RETURNING distributed_table.tenant_id, distributed_table.dept, distributed_table.info - tenant_id | dept | info ------------+------+------------------------ +DEBUG: generating subplan XXX_1 for subquery SELECT avg((tenant_id)::integer) AS avg_tenant_id FROM (SELECT distributed_table.tenant_id, reference_table.name FROM recursive_dml_queries.distributed_table, recursive_dml_queries.reference_table WHERE ((distributed_table.dept)::text OPERATOR(pg_catalog.=) reference_table.id) ORDER BY reference_table.name DESC, distributed_table.tenant_id DESC) tenant_ids +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = (foo.avg_tenant_id)::integer FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) distributed_table.tenant_id) RETURNING distributed_table.tenant_id, distributed_table.dept, distributed_table.info + tenant_id | dept | info +--------------------------------------------------------------------- 50 | 50 | {"f1": 50, "f2": 2500} (1 row) @@ -212,8 +212,8 @@ foo_inner_1 JOIN LATERAL ) foo_inner_2 ON (foo_inner_2.tenant_id != foo_inner_1.tenant_id) ORDER BY foo_inner_1.tenant_id; - tenant_id ------------ + tenant_id +--------------------------------------------------------------------- 14 24 34 @@ -261,7 +261,7 @@ FROM ON (foo_inner_2.tenant_id != foo_inner_1.tenant_id) ) as foo RETURNING *; -DEBUG: generating subplan 15_1 for subquery SELECT dept FROM recursive_dml_queries.second_distributed_table +DEBUG: generating subplan XXX_1 for subquery SELECT dept FROM recursive_dml_queries.second_distributed_table ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns -- again a corrolated subquery -- this time distribution key eq. exists @@ -297,8 +297,8 @@ ERROR: complex joins are only supported when all distributed tables are co-loca INSERT INTO second_distributed_table (tenant_id, dept) VALUES ('3', (WITH vals AS (SELECT 3) select * from vals)); -DEBUG: generating subplan 20_1 for CTE vals: SELECT 3 -DEBUG: Plan 20 query after replacing subqueries and CTEs: INSERT INTO recursive_dml_queries.second_distributed_table (tenant_id, dept) VALUES ('3'::text, (SELECT vals."?column?" FROM (SELECT intermediate_result."?column?" FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer)) vals)) +DEBUG: generating subplan XXX_1 for CTE vals: SELECT 3 +DEBUG: Plan XXX query after replacing subqueries and CTEs: INSERT INTO recursive_dml_queries.second_distributed_table (tenant_id, dept) VALUES ('3'::text, (SELECT vals."?column?" FROM (SELECT intermediate_result."?column?" FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer)) vals)) ERROR: subqueries are not supported within INSERT queries HINT: Try rewriting your queries with 'INSERT INTO ... SELECT' syntax. INSERT INTO @@ -321,8 +321,8 @@ UPDATE distributed_table SET dept = 5 FROM cte_1 WHERE distributed_table.tenant_id < cte_1.tenant_id; -DEBUG: generating subplan 22_1 for CTE cte_1: WITH cte_2 AS (SELECT second_distributed_table.tenant_id AS cte2_id FROM recursive_dml_queries.second_distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.>=) 2)) UPDATE recursive_dml_queries.distributed_table SET dept = 10 RETURNING tenant_id, dept, info -DEBUG: Plan 22 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = 5 FROM (SELECT intermediate_result.tenant_id, intermediate_result.dept, intermediate_result.info FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, dept integer, info jsonb)) cte_1 WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.<) cte_1.tenant_id) +DEBUG: generating subplan XXX_1 for CTE cte_1: WITH cte_2 AS (SELECT second_distributed_table.tenant_id AS cte2_id FROM recursive_dml_queries.second_distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.>=) 2)) UPDATE recursive_dml_queries.distributed_table SET dept = 10 RETURNING tenant_id, dept, info +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = 5 FROM (SELECT intermediate_result.tenant_id, intermediate_result.dept, intermediate_result.info FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, dept integer, info jsonb)) cte_1 WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.<) cte_1.tenant_id) WITH cte_1 AS ( WITH cte_2 AS ( SELECT tenant_id as cte2_id @@ -337,8 +337,8 @@ UPDATE distributed_table SET dept = 5 FROM cte_1 WHERE distributed_table.tenant_id < cte_1.tenant_id; -DEBUG: generating subplan 24_1 for CTE cte_1: WITH cte_2 AS (SELECT second_distributed_table.tenant_id AS cte2_id FROM recursive_dml_queries.second_distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.>=) 2)) UPDATE recursive_dml_queries.distributed_table SET dept = 10 RETURNING tenant_id, dept, info -DEBUG: Plan 24 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = 5 FROM (SELECT intermediate_result.tenant_id, intermediate_result.dept, intermediate_result.info FROM read_intermediate_result('24_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, dept integer, info jsonb)) cte_1 WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.<) cte_1.tenant_id) +DEBUG: generating subplan XXX_1 for CTE cte_1: WITH cte_2 AS (SELECT second_distributed_table.tenant_id AS cte2_id FROM recursive_dml_queries.second_distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.>=) 2)) UPDATE recursive_dml_queries.distributed_table SET dept = 10 RETURNING tenant_id, dept, info +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = 5 FROM (SELECT intermediate_result.tenant_id, intermediate_result.dept, intermediate_result.info FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, dept integer, info jsonb)) cte_1 WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.<) cte_1.tenant_id) -- we don't support updating local table with a join with -- distributed tables UPDATE diff --git a/src/test/regress/expected/ensure_no_intermediate_data_leak.out b/src/test/regress/expected/ensure_no_intermediate_data_leak.out index 2ed340294..3267501db 100644 --- a/src/test/regress/expected/ensure_no_intermediate_data_leak.out +++ b/src/test/regress/expected/ensure_no_intermediate_data_leak.out @@ -1,17 +1,17 @@ ------- +--------------------------------------------------------------------- -- THIS TEST SHOULD IDEALLY BE EXECUTED AT THE END OF -- THE REGRESSION TEST SUITE TO MAKE SURE THAT WE -- CLEAR ALL INTERMEDIATE RESULTS ON BOTH THE COORDINATOR -- AND ON THE WORKERS. HOWEVER, WE HAVE SOME ISSUES AROUND -- WINDOWS SUPPORT SO WE DISABLE THIS TEST ON WINDOWS ------- +--------------------------------------------------------------------- SELECT pg_ls_dir('base/pgsql_job_cache') WHERE citus_version() NOT ILIKE '%windows%'; - pg_ls_dir ------------ + pg_ls_dir +--------------------------------------------------------------------- (0 rows) SELECT * FROM run_command_on_workers($$SELECT pg_ls_dir('base/pgsql_job_cache') r WHERE citus_version() NOT ILIKE '%windows%'$$) WHERE result <> ''; - nodename | nodeport | success | result -----------+----------+---------+-------- + nodename | nodeport | success | result +--------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/escape_extension_name.out b/src/test/regress/expected/escape_extension_name.out index 1ecd9bb0c..45ca2a9a0 100644 --- a/src/test/regress/expected/escape_extension_name.out +++ b/src/test/regress/expected/escape_extension_name.out @@ -14,8 +14,8 @@ WHERE name = 'uuid-ossp' :uuid_present_command; -- show that the extension is created on both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,1) (localhost,57638,t,1) (2 rows) @@ -25,16 +25,16 @@ DROP EXTENSION "uuid-ossp"; RESET client_min_messages; -- show that the extension is dropped from both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) (2 rows) -- show that extension recreation on new nodes works also fine with extension names that require escaping SELECT 1 from master_remove_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -50,15 +50,15 @@ WHERE name = 'uuid-ossp' :uuid_present_command; -- and add the other node SELECT 1 from master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) -- show that the extension exists on both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,1) (localhost,57638,t,1) (2 rows) diff --git a/src/test/regress/expected/escape_extension_name_0.out b/src/test/regress/expected/escape_extension_name_0.out index 343520a9a..d27415e4a 100644 --- a/src/test/regress/expected/escape_extension_name_0.out +++ b/src/test/regress/expected/escape_extension_name_0.out @@ -12,15 +12,15 @@ FROM pg_available_extensions() WHERE name = 'uuid-ossp' \gset :uuid_present_command; - uuid_ossp_present -------------------- + uuid_ossp_present +--------------------------------------------------------------------- f (1 row) -- show that the extension is created on both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) (2 rows) @@ -31,16 +31,16 @@ ERROR: extension "uuid-ossp" does not exist RESET client_min_messages; -- show that the extension is dropped from both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) (2 rows) -- show that extension recreation on new nodes works also fine with extension names that require escaping SELECT 1 from master_remove_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -54,22 +54,22 @@ FROM pg_available_extensions() WHERE name = 'uuid-ossp' \gset :uuid_present_command; - uuid_ossp_present -------------------- + uuid_ossp_present +--------------------------------------------------------------------- f (1 row) -- and add the other node SELECT 1 from master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) -- show that the extension exists on both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) (2 rows) diff --git a/src/test/regress/expected/expression_reference_join.out b/src/test/regress/expected/expression_reference_join.out index d290a27f7..8076d9200 100644 --- a/src/test/regress/expected/expression_reference_join.out +++ b/src/test/regress/expected/expression_reference_join.out @@ -13,16 +13,16 @@ INSERT INTO test VALUES (2,2); SELECT create_reference_table('ref'); NOTICE: Copying data from local table... - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test', 'x'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- PR 3180 implements expressions in join clauses to reference tables to support CHbenCHmark queries 7/8/9 @@ -33,8 +33,8 @@ FROM ref a WHERE t2.y * 2 = a.a ORDER BY 1,2,3; - y | x | x | a | b ----+---+---+---+--- + y | x | x | a | b +--------------------------------------------------------------------- 2 | 1 | 1 | 4 | 4 2 | 1 | 2 | 4 | 4 2 | 2 | 1 | 4 | 4 @@ -53,8 +53,8 @@ FROM ref b WHERE t2.y - a.a - b.b = 0 ORDER BY 1,2,3; - y | x | x | a | b | a | b ----+---+---+---+---+---+--- + y | x | x | a | b | a | b +--------------------------------------------------------------------- (0 rows) -- The join clause is wider than it used to be, causing this query to be recognized by the LogicalPlanner as a repartition join. diff --git a/src/test/regress/expected/failure_1pc_copy_append.out b/src/test/regress/expected/failure_1pc_copy_append.out index 44d80cc52..bae1675cb 100644 --- a/src/test/regress/expected/failure_1pc_copy_append.out +++ b/src/test/regress/expected/failure_1pc_copy_append.out @@ -1,7 +1,7 @@ SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- do not cache any connections @@ -13,27 +13,27 @@ SET citus.next_shard_id TO 100400; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100; CREATE TABLE copy_test (key int, value int); SELECT create_distributed_table('copy_test', 'key', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT citus.clear_network_traffic(); - clear_network_traffic ------------------------ - + clear_network_traffic +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; SELECT count(1) FROM copy_test; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) SELECT citus.dump_network_traffic(); - dump_network_traffic ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + dump_network_traffic +--------------------------------------------------------------------- (0,coordinator,"[initial message]") (0,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=postgres)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=postgres)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']") (0,coordinator,"[""Query(query=SELECT worker_apply_shard_ddl_command (100400, 'CREATE TABLE public.copy_test (key integer, value integer)'))""]") @@ -58,130 +58,130 @@ SELECT citus.dump_network_traffic(); ---- all of the following tests test behavior with 2 shard placements ---- SHOW citus.shard_replication_factor; - citus.shard_replication_factor --------------------------------- + citus.shard_replication_factor +--------------------------------------------------------------------- 2 (1 row) ---- kill the connection when we try to create the shard ---- SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) ---- kill the connection when we try to start a transaction ---- SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction_id").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -ERROR: failure on connection marked as essential: localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) ---- kill the connection when we start the COPY ---- SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) ---- kill the connection when we send the data ---- SELECT citus.mitmproxy('conn.onCopyData().kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; -ERROR: failed to COPY to shard 100404 on localhost:9060 +ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT citus.mitmproxy('conn.onQuery(query="SELECT|COPY").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(1) FROM copy_test; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. - count -------- + count +--------------------------------------------------------------------- 4 (1 row) ---- cancel the connection when we send the data ---- SELECT citus.mitmproxy('conn.onQuery(query="SELECT|COPY").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -189,8 +189,8 @@ ERROR: canceling statement due to user request SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) @@ -199,82 +199,82 @@ SELECT count(1) FROM copy_test; ERROR: canceling statement due to user request ---- kill the connection when we try to get the size of the table ---- SELECT citus.mitmproxy('conn.onQuery(query="pg_table_size").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -ERROR: failure on connection marked as essential: localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) ---- kill the connection when we try to get the min, max of the table ---- SELECT citus.mitmproxy('conn.onQuery(query="SELECT min\(key\), max\(key\)").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -ERROR: failure on connection marked as essential: localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) ---- kill the connection when we try to COMMIT ---- SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 copy_test | 100408 | t | 0 | 3 | 100408 | 1 | 8192 | localhost | 57637 | 112 @@ -282,16 +282,16 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p (4 rows) SELECT count(1) FROM copy_test; - count -------- + count +--------------------------------------------------------------------- 8 (1 row) -- ==== Clean up, we're done here ==== SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP TABLE copy_test; diff --git a/src/test/regress/expected/failure_1pc_copy_hash.out b/src/test/regress/expected/failure_1pc_copy_hash.out index 0e4d97ec2..1a9d36355 100644 --- a/src/test/regress/expected/failure_1pc_copy_hash.out +++ b/src/test/regress/expected/failure_1pc_copy_hash.out @@ -1,7 +1,7 @@ SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- do not cache any connections @@ -14,27 +14,27 @@ SET citus.max_cached_conns_per_worker TO 0; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100; CREATE TABLE copy_test (key int, value int); SELECT create_distributed_table('copy_test', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT citus.clear_network_traffic(); - clear_network_traffic ------------------------ - + clear_network_traffic +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; SELECT count(1) FROM copy_test; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) SELECT citus.dump_network_traffic(); - dump_network_traffic ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + dump_network_traffic +--------------------------------------------------------------------- (0,coordinator,"[initial message]") (0,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=postgres)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=postgres)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']") (0,coordinator,"[""Query(query=BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(0, XX, 'XXXX-XX-XX XX:XX:XX.XXXXXX-XX');)""]") @@ -54,103 +54,103 @@ SELECT citus.dump_network_traffic(); -- ==== kill the connection when we try to start a transaction ==== -- the query should abort SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction").killall()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COPY copy_test, line 1: "0, 0" -ERROR: failure on connection marked as essential: localhost:9060 +ERROR: failure on connection marked as essential: localhost:xxxxx CONTEXT: COPY copy_test, line 1: "0, 0" -- ==== kill the connection when we try to start the COPY ==== -- the query should abort SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COPY copy_test, line 1: "0, 0" -- ==== kill the connection when we first start sending data ==== -- the query should abort SELECT citus.mitmproxy('conn.onCopyData().killall()'); -- raw rows from the client - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; -ERROR: failed to COPY to shard 100400 on localhost:9060 +ERROR: failed to COPY to shard xxxxx on localhost:xxxxx -- ==== kill the connection when the worker confirms it's received the data ==== -- the query should abort SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").killall()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; -ERROR: failed to COPY to shard 100400 on localhost:9060 +ERROR: failed to COPY to shard xxxxx on localhost:xxxxx -- ==== kill the connection when we try to send COMMIT ==== -- the query should succeed, and the placement should be marked inactive SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(1) FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass ) AND shardstate = 3; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(1) FROM copy_test; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").killall()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- the shard is marked invalid SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(1) FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass ) AND shardstate = 3; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT count(1) FROM copy_test; - count -------- + count +--------------------------------------------------------------------- 8 (1 row) @@ -169,24 +169,24 @@ ERROR: missing data for column "value" CONTEXT: COPY copy_test, line 5: "10" -- kill the connection if the coordinator sends COMMIT. It doesn't, so nothing changes SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9 && echo 10' WITH CSV; ERROR: missing data for column "value" CONTEXT: COPY copy_test, line 5: "10" SELECT * FROM copy_test ORDER BY key, value; - key | value ------+------- + key | value +--------------------------------------------------------------------- (0 rows) -- ==== clean up some more to prepare for tests with only one replica ==== SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE copy_test; @@ -194,8 +194,8 @@ UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_1_por SELECT * FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass ) ORDER BY nodeport, placementid; - shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+-----------+----------+------------- + shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- 100400 | 1 | 0 | localhost | 9060 | 100 100400 | 3 | 0 | localhost | 57637 | 101 (2 rows) @@ -203,8 +203,8 @@ SELECT * FROM pg_dist_shard_placement WHERE shardid IN ( -- ==== okay, run some tests where there's only one active shard ==== COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; SELECT * FROM copy_test; - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 0 1 | 1 2 | 4 @@ -213,13 +213,13 @@ SELECT * FROM copy_test; -- the worker is unreachable SELECT citus.mitmproxy('conn.killall()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -227,14 +227,14 @@ CONTEXT: COPY copy_test, line 1: "0, 0" ERROR: could not connect to any active placements CONTEXT: COPY copy_test, line 1: "0, 0" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM copy_test; - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 0 1 | 1 2 | 4 @@ -243,26 +243,26 @@ SELECT * FROM copy_test; -- the first message fails SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction_id").killall()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COPY copy_test, line 1: "0, 0" -ERROR: failure on connection marked as essential: localhost:9060 +ERROR: failure on connection marked as essential: localhost:xxxxx CONTEXT: COPY copy_test, line 1: "0, 0" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM copy_test; - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 0 1 | 1 2 | 4 @@ -271,26 +271,26 @@ SELECT * FROM copy_test; -- the COPY message fails SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COPY copy_test, line 1: "0, 0" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM copy_test; - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 0 1 | 1 2 | 4 @@ -299,22 +299,22 @@ SELECT * FROM copy_test; -- the COPY data fails SELECT citus.mitmproxy('conn.onCopyData().killall()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; -ERROR: failed to COPY to shard 100400 on localhost:9060 +ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM copy_test; - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 0 1 | 1 2 | 4 @@ -323,28 +323,28 @@ SELECT * FROM copy_test; -- the COMMIT fails SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").killall()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: could not commit transaction for shard 100400 on any active node +CONTEXT: while executing command on localhost:xxxxx +WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM copy_test; - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 0 1 | 1 2 | 4 @@ -355,45 +355,45 @@ SELECT * FROM copy_test; SELECT * FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass ) ORDER BY nodeport, placementid; - shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+-----------+----------+------------- + shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- 100400 | 1 | 0 | localhost | 9060 | 100 100400 | 3 | 0 | localhost | 57637 | 101 (2 rows) -- the COMMIT makes it through but the connection dies before we get a response SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT").killall()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: could not commit transaction for shard 100400 on any active node +CONTEXT: while executing command on localhost:xxxxx +WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass ) ORDER BY nodeport, placementid; - shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+-----------+----------+------------- + shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- 100400 | 1 | 0 | localhost | 9060 | 100 100400 | 3 | 0 | localhost | 57637 | 101 (2 rows) SELECT * FROM copy_test; - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 0 1 | 1 2 | 4 @@ -406,9 +406,9 @@ SELECT * FROM copy_test; -- ==== Clean up, we're done here ==== SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP TABLE copy_test; diff --git a/src/test/regress/expected/failure_add_disable_node.out b/src/test/regress/expected/failure_add_disable_node.out index d64542f6e..473df0f4a 100644 --- a/src/test/regress/expected/failure_add_disable_node.out +++ b/src/test/regress/expected/failure_add_disable_node.out @@ -5,64 +5,64 @@ -- tested as they don't create network activity -- SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SET citus.next_shard_id TO 200000; -- verify we have all worker nodes present SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port ------------+----------- + node_name | node_port +--------------------------------------------------------------------- localhost | 9060 localhost | 57637 (2 rows) -- verify there are no tables that could prevent add/remove node operations SELECT * FROM pg_dist_partition; - logicalrelid | partmethod | partkey | colocationid | repmodel ---------------+------------+---------+--------------+---------- + logicalrelid | partmethod | partkey | colocationid | repmodel +--------------------------------------------------------------------- (0 rows) CREATE SCHEMA add_remove_node; SET SEARCH_PATH=add_remove_node; CREATE TABLE user_table(user_id int, user_name text); SELECT create_reference_table('user_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE event_table(user_id int, event_id int, event_name text); SELECT create_distributed_table('event_table', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate ----------+------------ + shardid | shardstate +--------------------------------------------------------------------- 200000 | 1 200000 | 1 (2 rows) SELECT master_disable_node('localhost', :worker_2_proxy_port); -NOTICE: Node localhost:9060 has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 9060) to activate this node back. - master_disable_node ---------------------- - +NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 9060) to activate this node back. + master_disable_node +--------------------------------------------------------------------- + (1 row) SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port ------------+----------- + node_name | node_port +--------------------------------------------------------------------- localhost | 57637 (1 row) @@ -70,35 +70,35 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate ----------+------------ + shardid | shardstate +--------------------------------------------------------------------- 200000 | 1 (1 row) -- fail activate node by failing reference table creation SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT master_activate_node('localhost', :worker_2_proxy_port); -NOTICE: Replicating reference table "user_table" to the node localhost:9060 +NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- verify node is not activated SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port ------------+----------- + node_name | node_port +--------------------------------------------------------------------- localhost | 57637 (1 row) @@ -106,28 +106,28 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate ----------+------------ + shardid | shardstate +--------------------------------------------------------------------- 200000 | 1 (1 row) -- fail create schema command SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT master_activate_node('localhost', :worker_2_proxy_port); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- verify node is not activated SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port ------------+----------- + node_name | node_port +--------------------------------------------------------------------- localhost | 57637 (1 row) @@ -135,26 +135,26 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate ----------+------------ + shardid | shardstate +--------------------------------------------------------------------- 200000 | 1 (1 row) -- fail activate node by failing reference table creation SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT master_activate_node('localhost', :worker_2_proxy_port); -NOTICE: Replicating reference table "user_table" to the node localhost:9060 +NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx ERROR: canceling statement due to user request -- verify node is not activated SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port ------------+----------- + node_name | node_port +--------------------------------------------------------------------- localhost | 57637 (1 row) @@ -162,15 +162,15 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate ----------+------------ + shardid | shardstate +--------------------------------------------------------------------- 200000 | 1 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- master_remove_node fails when there are shards on that worker @@ -179,16 +179,16 @@ ERROR: you cannot remove the primary node of a node group which has shard place -- drop event table and re-run remove DROP TABLE event_table; SELECT master_remove_node('localhost', :worker_2_proxy_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) -- verify node is removed SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port ------------+----------- + node_name | node_port +--------------------------------------------------------------------- localhost | 57637 (1 row) @@ -196,8 +196,8 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate ----------+------------ + shardid | shardstate +--------------------------------------------------------------------- 200000 | 1 (1 row) @@ -205,45 +205,45 @@ ORDER BY placementid; -- it does not create any network activity therefore can not -- be injected failure through network SELECT master_add_inactive_node('localhost', :worker_2_proxy_port); - master_add_inactive_node --------------------------- + master_add_inactive_node +--------------------------------------------------------------------- 3 (1 row) SELECT master_remove_node('localhost', :worker_2_proxy_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate ----------+------------ + shardid | shardstate +--------------------------------------------------------------------- 200000 | 1 (1 row) -- test master_add_node replicated a reference table -- to newly added node. SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT master_add_node('localhost', :worker_2_proxy_port); -NOTICE: Replicating reference table "user_table" to the node localhost:9060 +NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- verify node is not added SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port ------------+----------- + node_name | node_port +--------------------------------------------------------------------- localhost | 57637 (1 row) @@ -251,25 +251,25 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate ----------+------------ + shardid | shardstate +--------------------------------------------------------------------- 200000 | 1 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT master_add_node('localhost', :worker_2_proxy_port); -NOTICE: Replicating reference table "user_table" to the node localhost:9060 +NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx ERROR: canceling statement due to user request -- verify node is not added SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port ------------+----------- + node_name | node_port +--------------------------------------------------------------------- localhost | 57637 (1 row) @@ -277,30 +277,30 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate ----------+------------ + shardid | shardstate +--------------------------------------------------------------------- 200000 | 1 (1 row) -- reset cluster to original state SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT master_add_node('localhost', :worker_2_proxy_port); -NOTICE: Replicating reference table "user_table" to the node localhost:9060 - master_add_node ------------------ +NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx + master_add_node +--------------------------------------------------------------------- 6 (1 row) -- verify node is added SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port ------------+----------- + node_name | node_port +--------------------------------------------------------------------- localhost | 9060 localhost | 57637 (2 rows) @@ -309,55 +309,55 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate ----------+------------ + shardid | shardstate +--------------------------------------------------------------------- 200000 | 1 200000 | 1 (2 rows) -- fail master_add_node by failing copy out operation SELECT master_remove_node('localhost', :worker_1_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT master_add_node('localhost', :worker_1_port); -NOTICE: Replicating reference table "user_table" to the node localhost:57637 -ERROR: could not copy table "user_table_200000" from "localhost:9060" -CONTEXT: while executing command on localhost:57637 +NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx +ERROR: could not copy table "user_table_200000" from "localhost:xxxxx" +CONTEXT: while executing command on localhost:xxxxx -- verify node is not added SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port ------------+----------- + node_name | node_port +--------------------------------------------------------------------- localhost | 9060 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT master_add_node('localhost', :worker_1_port); -NOTICE: Replicating reference table "user_table" to the node localhost:57637 - master_add_node ------------------ +NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx + master_add_node +--------------------------------------------------------------------- 8 (1 row) -- verify node is added SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port ------------+----------- + node_name | node_port +--------------------------------------------------------------------- localhost | 9060 localhost | 57637 (2 rows) @@ -366,8 +366,8 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate ----------+------------ + shardid | shardstate +--------------------------------------------------------------------- 200000 | 1 200000 | 1 (2 rows) @@ -377,8 +377,8 @@ DROP SCHEMA add_remove_node CASCADE; NOTICE: drop cascades to table add_remove_node.user_table SELECT * FROM run_command_on_workers('DROP SCHEMA IF EXISTS add_remove_node CASCADE') ORDER BY nodeport; - nodename | nodeport | success | result ------------+----------+---------+------------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 9060 | t | DROP SCHEMA localhost | 57637 | t | DROP SCHEMA (2 rows) diff --git a/src/test/regress/expected/failure_connection_establishment.out b/src/test/regress/expected/failure_connection_establishment.out index bbb430e22..778fcad8e 100644 --- a/src/test/regress/expected/failure_connection_establishment.out +++ b/src/test/regress/expected/failure_connection_establishment.out @@ -6,9 +6,9 @@ -- - timeout -- SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) CREATE SCHEMA fail_connect; @@ -23,9 +23,9 @@ CREATE TABLE products ( price numeric ); SELECT create_distributed_table('products', 'product_no'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Can only add primary key constraint on distribution column (or group of columns @@ -38,17 +38,17 @@ DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY const -- into connection establishment problems SET citus.node_connection_timeout TO 400; SELECT citus.mitmproxy('conn.delay(500)'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(product_no); -ERROR: could not establish any connections to the node localhost:9060 after 400 ms +ERROR: could not establish any connections to the node localhost:xxxxx after 400 ms SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) CREATE TABLE r1 ( @@ -61,26 +61,26 @@ INSERT INTO r1 (id, name) VALUES (3,'baz'); SELECT create_reference_table('r1'); NOTICE: Copying data from local table... - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT citus.clear_network_traffic(); - clear_network_traffic ------------------------ - + clear_network_traffic +--------------------------------------------------------------------- + (1 row) SELECT citus.mitmproxy('conn.delay(500)'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- we cannot control which replica of the reference table will be queried and there is -- only one specific client we can control the connection for. --- by using round-robin task_assignment_policy we can force to hit both machines. +-- by using round-robin task_assignment_policy we can force to hit both machines. -- and in the end, dumping the network traffic shows that the connection establishment -- is initiated to the node behind the proxy SET client_min_messages TO ERROR; @@ -88,135 +88,135 @@ SET citus.task_assignment_policy TO 'round-robin'; -- suppress the warning since we can't control which shard is chose first. Failure of this -- test would be if one of the queries does not return the result but an error. SELECT name FROM r1 WHERE id = 2; - name ------- + name +--------------------------------------------------------------------- bar (1 row) SELECT name FROM r1 WHERE id = 2; - name ------- + name +--------------------------------------------------------------------- bar (1 row) -- verify a connection attempt was made to the intercepted node, this would have cause the -- connection to have been delayed and thus caused a timeout SELECT citus.dump_network_traffic(); - dump_network_traffic -------------------------------------- + dump_network_traffic +--------------------------------------------------------------------- (0,coordinator,"[initial message]") (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) --- similar test with the above but this time on a +-- similar test with the above but this time on a -- distributed table instead of a reference table -- and with citus.force_max_query_parallelization is set SET citus.force_max_query_parallelization TO ON; SELECT citus.mitmproxy('conn.delay(500)'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- suppress the warning since we can't control which shard is chose first. Failure of this -- test would be if one of the queries does not return the result but an error. SELECT count(*) FROM products; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM products; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) --- use OFFSET 1 to prevent printing the line where source +-- use OFFSET 1 to prevent printing the line where source -- is the worker SELECT citus.dump_network_traffic() ORDER BY 1 OFFSET 1; - dump_network_traffic -------------------------------------- + dump_network_traffic +--------------------------------------------------------------------- (1,coordinator,"[initial message]") (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SET citus.shard_replication_factor TO 1; CREATE TABLE single_replicatated(key int); SELECT create_distributed_table('single_replicatated', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- this time the table is single replicated and we're still using the -- the max parallelization flag, so the query should fail SET citus.force_max_query_parallelization TO ON; SELECT citus.mitmproxy('conn.delay(500)'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM single_replicatated; -ERROR: could not establish any connections to the node localhost:9060 after 400 ms +ERROR: could not establish any connections to the node localhost:xxxxx after 400 ms SET citus.force_max_query_parallelization TO OFF; -- one similar test, but this time on modification queries -- to see that connection establishement failures could -- mark placement INVALID SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; -SELECT +SELECT count(*) as invalid_placement_count -FROM - pg_dist_shard_placement -WHERE - shardstate = 3 AND +FROM + pg_dist_shard_placement +WHERE + shardstate = 3 AND shardid IN (SELECT shardid from pg_dist_shard where logicalrelid = 'products'::regclass); - invalid_placement_count -------------------------- + invalid_placement_count +--------------------------------------------------------------------- 0 (1 row) SELECT citus.mitmproxy('conn.delay(500)'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO products VALUES (100, '100', 100); COMMIT; -SELECT +SELECT count(*) as invalid_placement_count -FROM - pg_dist_shard_placement -WHERE - shardstate = 3 AND +FROM + pg_dist_shard_placement +WHERE + shardstate = 3 AND shardid IN (SELECT shardid from pg_dist_shard where logicalrelid = 'products'::regclass); - invalid_placement_count -------------------------- + invalid_placement_count +--------------------------------------------------------------------- 1 (1 row) -- show that INSERT went through SELECT count(*) FROM products WHERE product_no = 100; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -224,15 +224,15 @@ RESET client_min_messages; -- verify get_global_active_transactions works when a timeout happens on a connection SELECT get_global_active_transactions(); WARNING: could not establish connection after 400 ms -WARNING: connection error: localhost:9060 - get_global_active_transactions --------------------------------- +WARNING: connection error: localhost:xxxxx + get_global_active_transactions +--------------------------------------------------------------------- (0 rows) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SET citus.node_connection_timeout TO DEFAULT; diff --git a/src/test/regress/expected/failure_copy_on_hash.out b/src/test/regress/expected/failure_copy_on_hash.out index 9f413641b..b867005af 100644 --- a/src/test/regress/expected/failure_copy_on_hash.out +++ b/src/test/regress/expected/failure_copy_on_hash.out @@ -5,9 +5,9 @@ CREATE SCHEMA copy_distributed_table; SET search_path TO 'copy_distributed_table'; SET citus.next_shard_id TO 1710000; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- With one placement COPY should error out and placement should stay healthy. @@ -16,27 +16,27 @@ SET citus.shard_count to 4; SET citus.max_cached_conns_per_worker to 0; CREATE TABLE test_table(id int, value_1 int); SELECT create_distributed_table('test_table','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -CREATE VIEW unhealthy_shard_count AS - SELECT count(*) - FROM pg_dist_shard_placement pdsp - JOIN - pg_dist_shard pds - ON pdsp.shardid=pds.shardid +CREATE VIEW unhealthy_shard_count AS + SELECT count(*) + FROM pg_dist_shard_placement pdsp + JOIN + pg_dist_shard pds + ON pdsp.shardid=pds.shardid WHERE logicalrelid='copy_distributed_table.test_table'::regclass AND shardstate != 1; -- Just kill the connection after sending the first query to the worker. SELECT citus.mitmproxy('conn.kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \COPY test_table FROM stdin delimiter ','; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -44,157 +44,157 @@ CONTEXT: COPY test_table, line 1: "1,2" ERROR: could not connect to any active placements CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- Now, kill the connection while copying the data SELECT citus.mitmproxy('conn.onCopyData().kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \COPY test_table FROM stdin delimiter ','; -ERROR: failed to COPY to shard 1710000 on localhost:9060 +ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- Similar to the above one, but now cancel the connection -- instead of killing it. SELECT citus.mitmproxy('conn.onCopyData().cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \COPY test_table FROM stdin delimiter ','; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- kill the connection after worker sends command complete message SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 1").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \COPY test_table FROM stdin delimiter ','; -ERROR: failed to COPY to shard 1710002 on localhost:9060 +ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- similar to above one, but cancel the connection on command complete SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 1").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \COPY test_table FROM stdin delimiter ','; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- kill the connection on PREPARE TRANSACTION SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \COPY test_table FROM stdin delimiter ','; ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -202,59 +202,59 @@ SELECT count(*) FROM test_table; SET client_min_messages TO ERROR; -- kill on command complete on COMMIT PREPARE, command should succeed SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT PREPARED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \COPY test_table FROM stdin delimiter ','; SET client_min_messages TO NOTICE; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) TRUNCATE TABLE test_table; -- kill on ROLLBACK, command could be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; \COPY test_table FROM stdin delimiter ','; ROLLBACK; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -264,42 +264,42 @@ NOTICE: drop cascades to view unhealthy_shard_count SET citus.shard_replication_factor TO 2; CREATE TABLE test_table_2(id int, value_1 int); SELECT create_distributed_table('test_table_2','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT citus.mitmproxy('conn.kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \COPY test_table_2 FROM stdin delimiter ','; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. CONTEXT: COPY test_table_2, line 1: "1,2" -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. CONTEXT: COPY test_table_2, line 2: "3,4" -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. CONTEXT: COPY test_table_2, line 3: "6,7" -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. CONTEXT: COPY test_table_2, line 5: "9,10" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate @@ -308,8 +308,8 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate ON pdsd.shardid = pds.shardid WHERE pds.logicalrelid = 'test_table_2'::regclass ORDER BY shardid, nodeport; - logicalrelid | shardid | shardstate ---------------+---------+------------ + logicalrelid | shardid | shardstate +--------------------------------------------------------------------- test_table_2 | 1710004 | 3 test_table_2 | 1710004 | 1 test_table_2 | 1710005 | 3 @@ -324,29 +324,29 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate DROP TABLE test_table_2; CREATE TABLE test_table_2(id int, value_1 int); SELECT create_distributed_table('test_table_2','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) --- Kill the connection when we try to start the COPY +-- Kill the connection when we try to start the COPY -- The query should abort SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \COPY test_table_2 FROM stdin delimiter ','; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COPY test_table_2, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate @@ -355,8 +355,8 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate ON pdsd.shardid = pds.shardid WHERE pds.logicalrelid = 'test_table_2'::regclass ORDER BY shardid, nodeport; - logicalrelid | shardid | shardstate ---------------+---------+------------ + logicalrelid | shardid | shardstate +--------------------------------------------------------------------- test_table_2 | 1710008 | 1 test_table_2 | 1710008 | 1 test_table_2 | 1710009 | 1 @@ -371,26 +371,26 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate DROP TABLE test_table_2; CREATE TABLE test_table_2(id int, value_1 int); SELECT create_distributed_table('test_table_2','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- When kill on copying data, it will be rollbacked and placements won't be labaled as invalid. --- Note that now we sent data to shard 210007, yet it is not marked as invalid. +-- Note that now we sent data to shard xxxxx, yet it is not marked as invalid. -- You can check the issue about this behaviour: https://github.com/citusdata/citus/issues/1933 SELECT citus.mitmproxy('conn.onCopyData().kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \COPY test_table_2 FROM stdin delimiter ','; -ERROR: failed to COPY to shard 1710012 on localhost:9060 +ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate @@ -399,8 +399,8 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate ON pdsd.shardid = pds.shardid WHERE pds.logicalrelid = 'test_table_2'::regclass ORDER BY shardid, nodeport; - logicalrelid | shardid | shardstate ---------------+---------+------------ + logicalrelid | shardid | shardstate +--------------------------------------------------------------------- test_table_2 | 1710012 | 1 test_table_2 | 1710012 | 1 test_table_2 | 1710013 | 1 diff --git a/src/test/regress/expected/failure_copy_to_reference.out b/src/test/regress/expected/failure_copy_to_reference.out index 77f854912..e34d24f3f 100644 --- a/src/test/regress/expected/failure_copy_to_reference.out +++ b/src/test/regress/expected/failure_copy_to_reference.out @@ -1,384 +1,384 @@ --- --- Failure tests for COPY to reference tables --- +-- +-- Failure tests for COPY to reference tables +-- CREATE SCHEMA copy_reference_failure; SET search_path TO 'copy_reference_failure'; SET citus.next_shard_id TO 130000; -- we don't want to see the prepared transaction numbers in the warnings SET client_min_messages TO ERROR; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_table(id int, value_1 int); SELECT create_reference_table('test_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -CREATE VIEW unhealthy_shard_count AS - SELECT count(*) - FROM pg_dist_shard_placement pdsp - JOIN - pg_dist_shard pds - ON pdsp.shardid=pds.shardid +CREATE VIEW unhealthy_shard_count AS + SELECT count(*) + FROM pg_dist_shard_placement pdsp + JOIN + pg_dist_shard pds + ON pdsp.shardid=pds.shardid WHERE logicalrelid='copy_reference_failure.test_table'::regclass AND shardstate != 1; --- in the first test, kill just in the first +-- in the first test, kill just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \copy test_table FROM STDIN DELIMITER ',' -ERROR: failure on connection marked as essential: localhost:9060 +ERROR: failure on connection marked as essential: localhost:xxxxx CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \copy test_table FROM STDIN DELIMITER ',' -ERROR: failure on connection marked as essential: localhost:9060 +ERROR: failure on connection marked as essential: localhost:xxxxx CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \copy test_table FROM STDIN DELIMITER ',' ERROR: canceling statement due to user request CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- kill as soon as the coordinator sends COPY command SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \copy test_table FROM STDIN DELIMITER ',' ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- cancel as soon as the coordinator sends COPY command SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \copy test_table FROM STDIN DELIMITER ',' ERROR: canceling statement due to user request CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- kill as soon as the worker sends CopyComplete SELECT citus.mitmproxy('conn.onCommandComplete(command="^COPY 3").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \copy test_table FROM STDIN DELIMITER ',' -ERROR: failed to COPY to shard 130000 on localhost:9060 +ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- cancel as soon as the coordinator sends CopyData SELECT citus.mitmproxy('conn.onCommandComplete(command="^COPY 3").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \copy test_table FROM STDIN DELIMITER ',' ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) --- kill the connection when we try to start the COPY +-- kill the connection when we try to start the COPY -- the query should abort SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \copy test_table FROM STDIN DELIMITER ',' ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- killing on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \copy test_table FROM STDIN DELIMITER ',' ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- cancelling on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \copy test_table FROM STDIN DELIMITER ',' ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- killing on command complete of COMMIT PREPARE, we should see that the command succeeds -- and all the workers committed SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT PREPARED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \copy test_table FROM STDIN DELIMITER ',' SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- we shouldn't have any prepared transactions in the workers SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) TRUNCATE test_table; -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) \copy test_table FROM STDIN DELIMITER ',' SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) --- Since we kill connections to one worker after commit arrives but the +-- Since we kill connections to one worker after commit arrives but the -- other worker connections are healthy, we cannot commit on 1 worker -- which has 1 active shard placements, but the other does. That's why -- we expect to see 1 recovered prepared transactions. SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) @@ -386,9 +386,9 @@ TRUNCATE test_table; -- finally, test failing on ROLLBACK just after the coordinator -- sends the ROLLBACK so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -396,32 +396,32 @@ SET LOCAL client_min_messages TO WARNING; \copy test_table FROM STDIN DELIMITER ',' ROLLBACK; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) --- but now kill just after the worker sends response to +-- but now kill just after the worker sends response to -- ROLLBACK command, command should have been rollbacked -- both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="^ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -429,28 +429,28 @@ SET LOCAL client_min_messages TO WARNING; \copy test_table FROM STDIN DELIMITER ',' ROLLBACK; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/failure_create_distributed_table_non_empty.out b/src/test/regress/expected/failure_create_distributed_table_non_empty.out index 8b5acb091..9402dde3e 100644 --- a/src/test/regress/expected/failure_create_distributed_table_non_empty.out +++ b/src/test/regress/expected/failure_create_distributed_table_non_empty.out @@ -1,6 +1,6 @@ --- --- Failure tests for COPY to reference tables --- +-- +-- Failure tests for COPY to reference tables +-- -- We have to keep two copies of this failure test -- because if the shards are created via the executor -- cancellations are processed, otherwise they are not @@ -8,9 +8,9 @@ CREATE SCHEMA create_distributed_table_non_empty_failure; SET search_path TO 'create_distributed_table_non_empty_failure'; SET citus.next_shard_id TO 11000000; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- we'll start with replication factor 1 and 2pc @@ -20,58 +20,58 @@ CREATE TABLE test_table(id int, value_1 int); INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- in the first test, kill the first connection we sent from the coordinator SELECT citus.mitmproxy('conn.kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- in the first test, cancel the first connection we sent from the coordinator SELECT citus.mitmproxy('conn.cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- kill as soon as the coordinator sends CREATE SCHEMA SELECT citus.mitmproxy('conn.onQuery(query="^CREATE SCHEMA").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,1) (2 rows) @@ -81,100 +81,100 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata -- does not check for interrupts until GetRemoteCommandResult is called. -- Since we already sent the command at this stage, the schemas get created in workers SELECT citus.mitmproxy('conn.onQuery(query="^CREATE SCHEMA").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,1) (2 rows) SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS create_distributed_table_non_empty_failure$$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,"DROP SCHEMA") (localhost,57637,t,"DROP SCHEMA") (2 rows) -- this triggers a schema creation which prevents further transactions around dependency propagation SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) CREATE TYPE schema_proc AS (a int); DROP TYPE schema_proc; -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,1) (localhost,57637,t,1) (2 rows) -- cancel as soon as the coordinator sends begin -- if the shards are created via the executor, the table creation will fail --- otherwise shards will be created because we ignore cancel requests during the shard creation +-- otherwise shards will be created because we ignore cancel requests during the shard creation -- Interrupts are hold in CreateShardsWithRoundRobinPolicy SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,1) (localhost,57637,t,1) (2 rows) @@ -184,85 +184,85 @@ CREATE TABLE test_table(id int, value_1 int); INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- kill as soon as the coordinator sends CREATE TABLE SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- kill as soon as the coordinator sends COPY SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- kill when the COPY is completed, it should be rollbacked properly SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); NOTICE: Copying data from local table... -ERROR: failed to COPY to shard 11000016 on localhost:9060 +ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) --- cancel as soon as the coordinator sends COPY, table +-- cancel as soon as the coordinator sends COPY, table -- should not be created and rollbacked properly SELECT citus.mitmproxy('conn.onQuery(query="COPY").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- cancel when the COPY is completed, it should be rollbacked properly SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); NOTICE: Copying data from local table... ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -271,77 +271,77 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- we don't want to see the prepared transaction numbers in the warnings SET client_min_messages TO ERROR; SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- immediately cancel when we see prepare transaction to see if the command -- successfully rollbacked the created shards SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 1 (1 row) -- kill as soon as the coordinator sends COMMIT -- shards should be created and kill should not affect SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 2 (1 row) @@ -355,26 +355,26 @@ INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- cancel as soon as the coordinator sends COMMIT -- shards should be created and kill should not affect SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -384,50 +384,50 @@ INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- kill as soon as the coordinator sends ROLLBACK -- the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ROLLBACK; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- cancel as soon as the coordinator sends ROLLBACK --- should be rollbacked +-- should be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -435,25 +435,25 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- testing for co-located tables. CREATE TABLE colocated_table(id int, value_1 int); SELECT create_distributed_table('colocated_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Now, cancel the connection just after transaction is opened on -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -461,25 +461,25 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -488,16 +488,16 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -505,25 +505,25 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -532,16 +532,16 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_apply_shard_ddl_command").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -549,34 +549,34 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_apply_shard_ddl_command").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) -- Now run the same tests with 1pc SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP TABLE colocated_table; @@ -587,130 +587,130 @@ CREATE TABLE test_table(id int, value_1 int); INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); SET citus.multi_shard_commit_protocol TO '1pc'; SELECT citus.mitmproxy('conn.kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) -- in the first test, cancel the first connection we sent from the coordinator SELECT citus.mitmproxy('conn.cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) -- this triggers a schema creation which prevents further transactions around dependency propagation SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) CREATE TYPE schema_proc AS (a int); DROP TYPE schema_proc; -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,1) (localhost,57637,t,1) (2 rows) -- cancel as soon as the coordinator sends begin -- if the shards are created via the executor, the table creation will fail --- otherwise shards will be created because we ignore cancel requests during the shard creation +-- otherwise shards will be created because we ignore cancel requests during the shard creation -- Interrupts are hold in CreateShardsWithRoundRobinPolicy SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,1) (localhost,57637,t,1) (2 rows) @@ -720,155 +720,155 @@ CREATE TABLE test_table(id int, value_1 int); INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- kill as soon as the coordinator sends CREATE TABLE SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- kill as soon as the coordinator sends COPY SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- kill when the COPY is completed, it should be rollbacked properly SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: failed to COPY to shard 11000096 on localhost:9060 +ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) --- cancel as soon as the coordinator sends COPY, table +-- cancel as soon as the coordinator sends COPY, table -- should not be created and rollbacked properly SELECT citus.mitmproxy('conn.onQuery(query="COPY").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- cancel when the COPY is completed, it should be rollbacked properly SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- kill as soon as the coordinator sends ROLLBACK -- the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ROLLBACK; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- cancel as soon as the coordinator sends ROLLBACK --- should be rollbacked +-- should be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- kill as soon as the coordinator sends COMMIT -- the command can be COMMITed SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) COMMIT; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -878,28 +878,28 @@ INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- cancel as soon as the coordinator sends COMMIT -- should be COMMITed SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) COMMIT; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -908,25 +908,25 @@ CREATE TABLE test_table(id int, value_1 int); INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); CREATE TABLE colocated_table(id int, value_1 int); SELECT create_distributed_table('colocated_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Now, cancel the connection just after transaction is opened on -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -934,19 +934,19 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -954,16 +954,16 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -971,33 +971,33 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP SCHEMA create_distributed_table_non_empty_failure CASCADE; diff --git a/src/test/regress/expected/failure_create_index_concurrently.out b/src/test/regress/expected/failure_create_index_concurrently.out index a93d6e784..1e53e7d4f 100644 --- a/src/test/regress/expected/failure_create_index_concurrently.out +++ b/src/test/regress/expected/failure_create_index_concurrently.out @@ -3,9 +3,9 @@ -- test create index concurrently command -- failure. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SET citus.shard_count = 4; -- two per worker @@ -13,16 +13,16 @@ CREATE SCHEMA index_schema; SET SEARCH_PATH=index_schema; CREATE TABLE index_test(id int, value_1 int, value_2 int); SELECT create_distributed_table('index_test', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- kill the connection when create command is issued SELECT citus.mitmproxy('conn.onQuery(query="CREATE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); @@ -30,32 +30,32 @@ ERROR: CONCURRENTLY-enabled index command failed DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- verify index is not created SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE indexname LIKE 'idx_index_test%' $$) WHERE nodeport = :worker_2_proxy_port; - nodename | nodeport | success | result ------------+----------+---------+-------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 9060 | t | 0 (1 row) DROP TABLE index_test; CREATE TABLE index_test(id int, value_1 int, value_2 int); SELECT create_reference_table('index_test'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- kill the connection when create command is issued SELECT citus.mitmproxy('conn.onQuery(query="CREATE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); @@ -63,26 +63,26 @@ ERROR: CONCURRENTLY-enabled index command failed DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP TABLE index_test; CREATE TABLE index_test(id int, value_1 int, value_2 int); SELECT create_distributed_table('index_test', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- cancel the connection when create command is issued -- network traffic may differ between execution during cancellation -- therefore dump_network_traffic() calls are not made SELECT citus.mitmproxy('conn.onQuery(query="CREATE").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); @@ -90,24 +90,24 @@ ERROR: CONCURRENTLY-enabled index command failed DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP TABLE index_test; CREATE TABLE index_test(id int, value_1 int, value_2 int); SELECT create_reference_table('index_test'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- cancel the connection when create command is issued SELECT citus.mitmproxy('conn.onQuery(query="CREATE").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); @@ -115,25 +115,25 @@ ERROR: CONCURRENTLY-enabled index command failed DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP TABLE index_test; CREATE TABLE index_test(id int, value_1 int, value_2 int); SELECT create_distributed_table('index_test', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); -- kill the connection when create command is issued SELECT citus.mitmproxy('conn.onQuery(query="DROP INDEX CONCURRENTLY").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP INDEX CONCURRENTLY IF EXISTS idx_index_test; @@ -141,16 +141,16 @@ ERROR: CONCURRENTLY-enabled index command failed DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- verify index is not dropped at worker 2 SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE indexname LIKE 'idx_index_test%' $$) WHERE nodeport = :worker_2_proxy_port; - nodename | nodeport | success | result ------------+----------+---------+-------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 9060 | t | 4 (1 row) @@ -160,8 +160,8 @@ NOTICE: drop cascades to table index_schema.index_test -- verify index is not at worker 2 upon cleanup SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE indexname LIKE 'idx_index_test%' $$) WHERE nodeport = :worker_2_proxy_port; - nodename | nodeport | success | result ------------+----------+---------+-------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 9060 | t | 0 (1 row) diff --git a/src/test/regress/expected/failure_create_reference_table.out b/src/test/regress/expected/failure_create_reference_table.out index 9cd6b4a97..a9d15187c 100644 --- a/src/test/regress/expected/failure_create_reference_table.out +++ b/src/test/regress/expected/failure_create_reference_table.out @@ -5,9 +5,9 @@ CREATE SCHEMA failure_reference_table; SET search_path TO 'failure_reference_table'; SET citus.next_shard_id TO 10000000; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- this is merely used to get the schema creation propagated. Without there are failures @@ -19,117 +19,117 @@ INSERT INTO ref_table VALUES(1),(2),(3); -- Kill on sending first query to worker node, should error -- out and not create any placement SELECT citus.mitmproxy('conn.onQuery().kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('ref_table'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard_placement; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- Kill after creating transaction on worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="BEGIN").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('ref_table'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard_placement; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- Cancel after creating transaction on worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="BEGIN").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('ref_table'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard_placement; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- Kill after copying data to worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="SELECT 1").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('ref_table'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard_placement; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- Cancel after copying data to worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="SELECT 1").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('ref_table'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard_placement; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- Kill after copying data to worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 3").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('ref_table'); NOTICE: Copying data from local table... -ERROR: failed to COPY to shard 10000005 on localhost:9060 +ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT count(*) FROM pg_dist_shard_placement; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- Cancel after copying data to worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 3").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('ref_table'); NOTICE: Copying data from local table... ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard_placement; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -138,51 +138,51 @@ SET client_min_messages TO ERROR; -- Kill after preparing transaction. Since we don't commit after preparing, we recover -- prepared transaction afterwards. SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('ref_table'); ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard_placement; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 1 (1 row) -- Kill after commiting prepared, this should succeed SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT PREPARED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('ref_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT shardid, nodeport, shardstate FROM pg_dist_shard_placement ORDER BY shardid, nodeport; - shardid | nodeport | shardstate -----------+----------+------------ + shardid | nodeport | shardstate +--------------------------------------------------------------------- 10000008 | 9060 | 1 10000008 | 57637 | 1 (2 rows) SET client_min_messages TO NOTICE; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP TABLE ref_table; @@ -192,9 +192,9 @@ CREATE TABLE ref_table(id int); INSERT INTO ref_table VALUES(1),(2),(3); -- Test in transaction SELECT citus.mitmproxy('conn.onQuery().kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -202,52 +202,52 @@ SELECT create_reference_table('ref_table'); WARNING: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 -ERROR: failure on connection marked as essential: localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx COMMIT; -- kill on ROLLBACK, should be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; SELECT create_reference_table('ref_table'); NOTICE: Copying data from local table... - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) ROLLBACK; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+----------+----------+------------- + shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- (0 rows) -- cancel when the coordinator send ROLLBACK, should be rollbacked. We ignore cancellations -- during the ROLLBACK. SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; SELECT create_reference_table('ref_table'); NOTICE: Copying data from local table... - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) ROLLBACK; SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+----------+----------+------------- + shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- (0 rows) DROP SCHEMA failure_reference_table CASCADE; diff --git a/src/test/regress/expected/failure_create_table.out b/src/test/regress/expected/failure_create_table.out index 9a74ae556..fe839b791 100644 --- a/src/test/regress/expected/failure_create_table.out +++ b/src/test/regress/expected/failure_create_table.out @@ -4,41 +4,41 @@ CREATE SCHEMA failure_create_table; SET search_path TO 'failure_create_table'; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SET citus.shard_replication_factor TO 1; SET citus.shard_count to 4; CREATE TABLE test_table(id int, value_1 int); --- Kill connection before sending query to the worker +-- Kill connection before sending query to the worker SELECT citus.mitmproxy('conn.kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -48,31 +48,31 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- be created only on the node which is not behind the proxy. -- https://github.com/citusdata/citus/pull/1652 SELECT citus.mitmproxy('conn.onQuery(query="^CREATE SCHEMA").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'failure_create_table'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,1) (2 rows) @@ -83,62 +83,62 @@ CREATE TYPE schema_proc AS (a int); DROP TYPE schema_proc; -- Now, kill the connection while opening transaction on workers. SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) -- Now, kill the connection after sending create table command with worker_apply_shard_ddl_command UDF SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_shard_ddl_command").after(1).kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -148,32 +148,32 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_shard_ddl_command").after(1).kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. COMMIT; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -182,28 +182,28 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- workers. Note that, cancel requests will be ignored during -- shard creation. SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table','id'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -213,120 +213,120 @@ CREATE TABLE test_table(id int, value_1 int); -- Kill and cancel the connection with colocate_with option while sending the create table command CREATE TABLE temp_table(id int, value_1 int); SELECT create_distributed_table('temp_table','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) -- Kill and cancel the connection after worker sends "PREPARE TRANSACTION" ack with colocate_with option SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -338,35 +338,35 @@ DROP SCHEMA failure_create_table; CREATE SCHEMA failure_create_table; CREATE TABLE test_table(id int, value_1 int); -- Test inside transaction --- Kill connection before sending query to the worker +-- Kill connection before sending query to the worker SELECT citus.mitmproxy('conn.kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -377,33 +377,33 @@ CREATE TYPE schema_proc AS (a int); DROP TYPE schema_proc; -- Now, kill the connection while creating transaction on workers in transaction. SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -413,9 +413,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- shard creation again in transaction if we're not relying on the -- executor. So, we'll have two output files SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -423,26 +423,26 @@ SELECT create_distributed_table('test_table','id'); ERROR: canceling statement due to user request COMMIT; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 1 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -456,66 +456,66 @@ CREATE TABLE test_table(id int, value_1 int); SET citus.multi_shard_commit_protocol TO "1pc"; -- Kill connection before sending query to the worker with 1pc. SELECT citus.mitmproxy('conn.kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) -- Kill connection while sending create table command with 1pc. SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -526,33 +526,33 @@ CREATE TYPE schema_proc AS (a int); DROP TYPE schema_proc; -- Now, kill the connection while opening transactions on workers with 1pc. Transaction will be opened due to BEGIN. SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -561,9 +561,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- workers with 1pc. Note that, cancel requests will be ignored during -- shard creation unless the executor is used. So, we'll have two output files SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -571,26 +571,26 @@ SELECT create_distributed_table('test_table','id'); ERROR: canceling statement due to user request COMMIT; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -602,108 +602,108 @@ CREATE SCHEMA failure_create_table; SET citus.multi_shard_commit_protocol TO "2pc"; CREATE TABLE test_table_2(id int, value_1 int); SELECT master_create_distributed_table('test_table_2', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Kill connection before sending query to the worker SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('test_table_2', 4, 2); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) -- Kill the connection after worker sends "PREPARE TRANSACTION" ack SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('test_table_2', 4, 2); ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) -- Cancel the connection after sending prepare transaction in master_create_worker_shards SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('test_table_2', 4, 2); ERROR: canceling statement due to user request -- Show that there is no pending transaction SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 1 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_shard; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) diff --git a/src/test/regress/expected/failure_cte_subquery.out b/src/test/regress/expected/failure_cte_subquery.out index 9bf9e2531..6ae86cfd9 100644 --- a/src/test/regress/expected/failure_cte_subquery.out +++ b/src/test/regress/expected/failure_cte_subquery.out @@ -7,23 +7,23 @@ SELECT pg_backend_pid() as pid \gset CREATE TABLE users_table (user_id int, user_name text); CREATE TABLE events_table(user_id int, event_id int, event_type int); SELECT create_distributed_table('users_table', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('events_table', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE users_table_local AS SELECT * FROM users_table; -- kill at the first copy (push) SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) WITH cte AS ( @@ -35,29 +35,29 @@ WITH cte AS ( ) SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id ) -SELECT - count(*) -FROM +SELECT + count(*) +FROM cte, - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 - ) as foo + ) as foo WHERE foo.user_id = cte.user_id; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- kill at the second copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT user_id FROM cte_failure.events_table_16000002").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) WITH cte AS ( @@ -69,29 +69,29 @@ WITH cte AS ( ) SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id ) -SELECT - count(*) -FROM +SELECT + count(*) +FROM cte, - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 - ) as foo + ) as foo WHERE foo.user_id = cte.user_id; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- kill at the third copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT DISTINCT users_table.user").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) WITH cte AS ( @@ -103,29 +103,29 @@ WITH cte AS ( ) SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id ) -SELECT - count(*) -FROM +SELECT + count(*) +FROM cte, - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 - ) as foo + ) as foo WHERE foo.user_id = cte.user_id; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- cancel at the first copy (push) SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) WITH cte AS ( @@ -137,26 +137,26 @@ WITH cte AS ( ) SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id ) -SELECT - count(*) -FROM +SELECT + count(*) +FROM cte, - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 - ) as foo + ) as foo WHERE foo.user_id = cte.user_id; ERROR: canceling statement due to user request -- cancel at the second copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT user_id FROM").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) WITH cte AS ( @@ -168,26 +168,26 @@ WITH cte AS ( ) SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id ) -SELECT - count(*) -FROM +SELECT + count(*) +FROM cte, - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 - ) as foo + ) as foo WHERE foo.user_id = cte.user_id; ERROR: canceling statement due to user request -- cancel at the third copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT DISTINCT users_table.user").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) WITH cte AS ( @@ -199,34 +199,34 @@ WITH cte AS ( ) SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id ) -SELECT - count(*) -FROM +SELECT + count(*) +FROM cte, - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 - ) as foo + ) as foo WHERE foo.user_id = cte.user_id; ERROR: canceling statement due to user request -- distributed update tests SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- insert some rows INSERT INTO users_table VALUES (1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E'); INSERT INTO events_table VALUES (1,1,1), (1,2,1), (1,3,1), (2,1, 4), (3, 4,1), (5, 1, 2), (5, 2, 1), (5, 2,2); SELECT * FROM users_table ORDER BY 1, 2; - user_id | user_name ----------+----------- + user_id | user_name +--------------------------------------------------------------------- 1 | A 2 | B 3 | C @@ -239,8 +239,8 @@ WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURN INSERT INTO users_table SELECT * FROM cte_delete; -- verify contents are the same SELECT * FROM users_table ORDER BY 1, 2; - user_id | user_name ----------+----------- + user_id | user_name +--------------------------------------------------------------------- 1 | A 2 | B 3 | C @@ -250,27 +250,27 @@ SELECT * FROM users_table ORDER BY 1, 2; -- kill connection during deletion SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) INSERT INTO users_table SELECT * FROM cte_delete; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM users_table ORDER BY 1, 2; - user_id | user_name ----------+----------- + user_id | user_name +--------------------------------------------------------------------- 1 | A 2 | B 3 | C @@ -280,9 +280,9 @@ SELECT * FROM users_table ORDER BY 1, 2; -- kill connection during insert SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) @@ -290,17 +290,17 @@ INSERT INTO users_table SELECT * FROM cte_delete; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM users_table ORDER BY 1, 2; - user_id | user_name ----------+----------- + user_id | user_name +--------------------------------------------------------------------- 1 | A 2 | B 3 | C @@ -310,9 +310,9 @@ SELECT * FROM users_table ORDER BY 1, 2; -- cancel during deletion SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) @@ -320,14 +320,14 @@ INSERT INTO users_table SELECT * FROM cte_delete; ERROR: canceling statement due to user request -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM users_table ORDER BY 1, 2; - user_id | user_name ----------+----------- + user_id | user_name +--------------------------------------------------------------------- 1 | A 2 | B 3 | C @@ -337,9 +337,9 @@ SELECT * FROM users_table ORDER BY 1, 2; -- cancel during insert SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) @@ -347,14 +347,14 @@ INSERT INTO users_table SELECT * FROM cte_delete; ERROR: canceling statement due to user request -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM users_table ORDER BY 1, 2; - user_id | user_name ----------+----------- + user_id | user_name +--------------------------------------------------------------------- 1 | A 2 | B 3 | C @@ -364,25 +364,25 @@ SELECT * FROM users_table ORDER BY 1, 2; -- test sequential delete/insert SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; SET LOCAL citus.multi_shard_modify_mode = 'sequential'; WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) INSERT INTO users_table SELECT * FROM cte_delete; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. END; RESET SEARCH_PATH; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP SCHEMA cte_failure CASCADE; diff --git a/src/test/regress/expected/failure_ddl.out b/src/test/regress/expected/failure_ddl.out index dd63b47a1..8b31e291c 100644 --- a/src/test/regress/expected/failure_ddl.out +++ b/src/test/regress/expected/failure_ddl.out @@ -1,8 +1,8 @@ --- --- Test DDL command propagation failures +-- +-- Test DDL command propagation failures -- Different dimensions we're testing: -- Replication factor, 1PC-2PC, sequential-parallel modes --- +-- CREATE SCHEMA ddl_failure; SET citus.force_max_query_parallelization TO ON; SET search_path TO 'ddl_failure'; @@ -11,9 +11,9 @@ SET citus.max_cached_conns_per_worker TO 0; -- we don't want to see the prepared transaction numbers in the warnings SET client_min_messages TO ERROR; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SET citus.next_shard_id TO 100800; @@ -23,142 +23,142 @@ SET citus.shard_count = 4; SET citus.shard_replication_factor = 1; CREATE TABLE test_table (key int, value int); SELECT create_distributed_table('test_table', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) --- in the first test, kill just in the first +-- in the first test, kill just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg -------------- + array_agg +--------------------------------------------------------------------- {key,value} (1 row) --- cancel just in the first +-- cancel just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg -------------- + array_agg +--------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg -------------- + array_agg +--------------------------------------------------------------------- {key,value} (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg -------------- + array_agg +--------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- show that we've never commited the changes SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg -------------- + array_agg +--------------------------------------------------------------------- {key,value} (1 row) -- cancel as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request -- show that we've never commited the changes SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg -------------- + array_agg +--------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- since we've killed the connection just after -- the coordinator sends the COMMIT, the command should be applied -- to the distributed table and the shards on the other worker --- however, there is no way to recover the failure on the shards +-- however, there is no way to recover the failure on the shards -- that live in the failed worker, since we're running 1PC SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg ------------------------- + array_agg +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------------------ + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,value}") (localhost,9060,100802,t,"{key,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -173,36 +173,36 @@ SET citus.shard_count = 4; SET citus.shard_replication_factor = 1; CREATE TABLE test_table (key int, value int); SELECT create_distributed_table('test_table', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- cancel as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) --- interrupts are held during COMMIT/ROLLBACK, so the command +-- interrupts are held during COMMIT/ROLLBACK, so the command -- should have been applied without any issues since cancel is ignored SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg ------------------------- + array_agg +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------------------ + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -211,66 +211,66 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- the following tests rely the column not exists, so drop manually ALTER TABLE test_table DROP COLUMN new_column; --- but now kill just after the worker sends response to +-- but now kill just after the worker sends response to -- COMMIT command, so we'll have lots of warnings but the command -- should have been committed both on the distributed table and the placements SET client_min_messages TO WARNING; SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: could not commit transaction for shard 100802 on any active node -WARNING: could not commit transaction for shard 100800 on any active node +CONTEXT: while executing command on localhost:xxxxx +WARNING: could not commit transaction for shard xxxxx on any active node +WARNING: could not commit transaction for shard xxxxx on any active node SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SET client_min_messages TO ERROR; SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg ------------------------- + array_agg +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------------------ + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") (localhost,57637,100803,t,"{key,new_column,value}") (4 rows) --- now cancel just after the worker sends response to +-- now cancel just after the worker sends response to -- but Postgres doesn't accepts interrupts during COMMIT and ROLLBACK -- so should not cancel at all, so not an effective test but adding in -- case Citus messes up this behaviour SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table DROP COLUMN new_column; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- the remaining tests rely on table having new_column @@ -279,9 +279,9 @@ ALTER TABLE test_table ADD COLUMN new_column INT; -- fail just after the coordinator sends the ROLLBACK -- so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -289,183 +289,183 @@ SET LOCAL client_min_messages TO WARNING; ALTER TABLE test_table DROP COLUMN new_column; ROLLBACK; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 --- now cancel just after the worker sends response to +CONTEXT: while executing command on localhost:xxxxx +-- now cancel just after the worker sends response to -- but Postgres doesn't accepts interrupts during COMMIT and ROLLBACK -- so should not cancel at all, so not an effective test but adding in -- case Citus messes up this behaviour SELECT citus.mitmproxy('conn.onQuery(query="ROLLBACK").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; ALTER TABLE test_table DROP COLUMN new_column; ROLLBACK; --- but now kill just after the worker sends response to +-- but now kill just after the worker sends response to -- ROLLBACK command, so we'll have lots of warnings but the command -- should have been rollbacked both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; ALTER TABLE test_table DROP COLUMN new_column; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg ------------------------- + array_agg +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------------------ + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") (localhost,57637,100803,t,"{key,new_column,value}") (4 rows) --- now, lets test with 2PC +-- now, lets test with 2PC SET citus.multi_shard_commit_protocol TO '2pc'; --- in the first test, kill just in the first +-- in the first test, kill just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table DROP COLUMN new_column; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg ------------------------- + array_agg +--------------------------------------------------------------------- {key,new_column,value} (1 row) --- cancel just in the first +-- cancel just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table DROP COLUMN new_column; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg ------------------------- + array_agg +--------------------------------------------------------------------- {key,new_column,value} (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table DROP COLUMN new_column; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg ------------------------- + array_agg +--------------------------------------------------------------------- {key,new_column,value} (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table DROP COLUMN new_column; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg ------------------------- + array_agg +--------------------------------------------------------------------- {key,new_column,value} (1 row) -- kill as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table DROP COLUMN new_column; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg ------------------------- + array_agg +--------------------------------------------------------------------- {key,new_column,value} (1 row) -- cancel as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table DROP COLUMN new_column; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg ------------------------- + array_agg +--------------------------------------------------------------------- {key,new_column,value} (1 row) -- killing on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table DROP COLUMN new_column; ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg ------------------------- + array_agg +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------------------ + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -475,14 +475,14 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- we should be able to recover the transaction and -- see that the command is rollbacked SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 2 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------------------ + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -491,28 +491,28 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- cancelling on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table DROP COLUMN new_column; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg ------------------------- + array_agg +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------------------ + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -522,14 +522,14 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- we should be able to recover the transaction and -- see that the command is rollbacked SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 1 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------------------ + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -539,27 +539,27 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- killing on command complete of COMMIT PREPARE, we should see that the command succeeds -- and all the workers committed SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT PREPARED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table DROP COLUMN new_column; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg -------------- + array_agg +--------------------------------------------------------------------- {key,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------- + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,value}") (localhost,9060,100802,t,"{key,value}") (localhost,57637,100801,t,"{key,value}") @@ -568,14 +568,14 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- we shouldn't have any prepared transactions in the workers SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------- + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,value}") (localhost,9060,100802,t,"{key,value}") (localhost,57637,100801,t,"{key,value}") @@ -584,29 +584,29 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- some of the placements would be missing the new column -- since we've not commited the prepared transactions SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg ------------------------- + array_agg +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------------------ + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,value}") (localhost,9060,100802,t,"{key,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -616,14 +616,14 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- we should be able to recover the transaction and -- see that the command is committed SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 2 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------------------ + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -634,64 +634,64 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- fail just after the coordinator sends the ROLLBACK -- so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; ALTER TABLE test_table DROP COLUMN new_column; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- ROLLBACK should have failed on the distributed table and the placements SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg ------------------------- + array_agg +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------------------ + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") (localhost,57637,100803,t,"{key,new_column,value}") (4 rows) --- but now kill just after the worker sends response to +-- but now kill just after the worker sends response to -- ROLLBACK command, so we'll have lots of warnings but the command -- should have been rollbacked both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; ALTER TABLE test_table DROP COLUMN new_column; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- make sure that the transaction is rollbacked SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------------------ + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -706,142 +706,142 @@ SET citus.shard_replication_factor = 2; DROP TABLE test_table; CREATE TABLE test_table (key int, value int); SELECT create_distributed_table('test_table', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) --- in the first test, kill just in the first +-- in the first test, kill just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg -------------- + array_agg +--------------------------------------------------------------------- {key,value} (1 row) --- cancel just in the first +-- cancel just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg -------------- + array_agg +--------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg -------------- + array_agg +--------------------------------------------------------------------- {key,value} (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg -------------- + array_agg +--------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg -------------- + array_agg +--------------------------------------------------------------------- {key,value} (1 row) -- cancel as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg -------------- + array_agg +--------------------------------------------------------------------- {key,value} (1 row) -- killing on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- we should be able to recover the transaction and -- see that the command is rollbacked on all workers -- note that in this case recover_prepared_transactions() --- sends ROLLBACK PREPARED to the workers given that +-- sends ROLLBACK PREPARED to the workers given that -- the transaction has not been commited on any placement yet SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------- + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100804,t,"{key,value}") (localhost,9060,100805,t,"{key,value}") (localhost,9060,100806,t,"{key,value}") @@ -855,27 +855,27 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- killing on command complete of COMMIT PREPARE, we should see that the command succeeds -- and all the workers committed SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT PREPARED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg ------------------------- + array_agg +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------------------ + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100804,t,"{key,new_column,value}") (localhost,9060,100805,t,"{key,new_column,value}") (localhost,9060,100806,t,"{key,new_column,value}") @@ -888,14 +888,14 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- we shouldn't have any prepared transactions in the workers SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------------------ + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100804,t,"{key,new_column,value}") (localhost,9060,100805,t,"{key,new_column,value}") (localhost,9060,100806,t,"{key,new_column,value}") @@ -908,29 +908,29 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table DROP COLUMN new_column; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- some of the placements would be missing the new column -- since we've not commited the prepared transactions SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg -------------- + array_agg +--------------------------------------------------------------------- {key,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ----------------------------------------------------- + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100804,t,"{key,new_column,value}") (localhost,9060,100805,t,"{key,new_column,value}") (localhost,9060,100806,t,"{key,new_column,value}") @@ -944,14 +944,14 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- we should be able to recover the transaction and -- see that the command is committed SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------- + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100804,t,"{key,value}") (localhost,9060,100805,t,"{key,value}") (localhost,9060,100806,t,"{key,value}") @@ -966,30 +966,30 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- fail just after the coordinator sends the ROLLBACK -- so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; ALTER TABLE test_table ADD COLUMN new_column INT; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- ROLLBACK should have failed on the distributed table and the placements SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg -------------- + array_agg +--------------------------------------------------------------------- {key,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------- + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100804,t,"{key,value}") (localhost,9060,100805,t,"{key,value}") (localhost,9060,100806,t,"{key,value}") @@ -1000,34 +1000,34 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD (localhost,57637,100807,t,"{key,value}") (8 rows) --- but now kill just after the worker sends response to +-- but now kill just after the worker sends response to -- ROLLBACK command, so we'll have lots of warnings but the command -- should have been rollbacked both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; ALTER TABLE test_table ADD COLUMN new_column INT; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- make sure that the transaction is rollbacked SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements ------------------------------------------- + run_command_on_placements +--------------------------------------------------------------------- (localhost,9060,100804,t,"{key,value}") (localhost,9060,100805,t,"{key,value}") (localhost,9060,100806,t,"{key,value}") @@ -1042,66 +1042,66 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD SET citus.multi_shard_modify_mode TO 'sequential'; -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg -------------- + array_agg +--------------------------------------------------------------------- {key,value} (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg -------------- + array_agg +--------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- kill as soon as the coordinator after it sends worker_apply_shard_ddl_command 2nd time SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").after(2).kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- cancel as soon as the coordinator after it sends worker_apply_shard_ddl_command 2nd time SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").after(2).cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; diff --git a/src/test/regress/expected/failure_insert_select_pushdown.out b/src/test/regress/expected/failure_insert_select_pushdown.out index a27290627..b92b49b36 100644 --- a/src/test/regress/expected/failure_insert_select_pushdown.out +++ b/src/test/regress/expected/failure_insert_select_pushdown.out @@ -4,9 +4,9 @@ -- performs failure/cancellation test for insert/select pushed down to shards. -- SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) CREATE SCHEMA insert_select_pushdown; @@ -17,131 +17,131 @@ SELECT pg_backend_pid() as pid \gset CREATE TABLE events_table(user_id int, event_id int, event_type int); CREATE TABLE events_summary(user_id int, event_id int, event_count int); SELECT create_distributed_table('events_table', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('events_summary', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO events_table VALUES (1, 1, 3 ), (1, 2, 1), (1, 3, 2), (2, 4, 3), (3, 5, 1), (4, 7, 1), (4, 1, 9), (4, 3, 2); -SELECT count(*) FROM events_summary; - count -------- +SELECT count(*) FROM events_summary; + count +--------------------------------------------------------------------- 0 (1 row) -- insert/select from one distributed table to another -- kill worker query SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO events_summary SELECT user_id, event_id, count(*) FROM events_table GROUP BY 1,2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM events_summary; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- cancel worker query SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO events_summary SELECT user_id, event_id, count(*) FROM events_table GROUP BY 1,2; ERROR: canceling statement due to user request --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM events_summary; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- test self insert/select SELECT count(*) FROM events_table; - count -------- + count +--------------------------------------------------------------------- 8 (1 row) -- kill worker query SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO events_table SELECT * FROM events_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM events_table; - count -------- + count +--------------------------------------------------------------------- 8 (1 row) -- cancel worker query SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO events_table SELECT * FROM events_table; ERROR: canceling statement due to user request --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM events_table; - count -------- + count +--------------------------------------------------------------------- 8 (1 row) RESET SEARCH_PATH; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP SCHEMA insert_select_pushdown CASCADE; diff --git a/src/test/regress/expected/failure_insert_select_via_coordinator.out b/src/test/regress/expected/failure_insert_select_via_coordinator.out index 735194327..b46a73c27 100644 --- a/src/test/regress/expected/failure_insert_select_via_coordinator.out +++ b/src/test/regress/expected/failure_insert_select_via_coordinator.out @@ -14,145 +14,145 @@ CREATE TABLE events_summary(event_id int, event_type int, event_count int); CREATE TABLE events_reference(event_type int, event_count int); CREATE TABLE events_reference_distributed(event_type int, event_count int); SELECT create_distributed_table('events_table', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('events_summary', 'event_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('events_reference'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('events_reference_distributed', 'event_type'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO events_table VALUES (1, 1, 3 ), (1, 2, 1), (1, 3, 2), (2, 4, 3), (3, 5, 1), (4, 7, 1), (4, 1, 9), (4, 3, 2); -SELECT count(*) FROM events_summary; - count -------- +SELECT count(*) FROM events_summary; + count +--------------------------------------------------------------------- 0 (1 row) -- insert/select from one distributed table to another -- kill coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_table GROUP BY 1,2; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_table GROUP BY 1,2; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_table GROUP BY 1,2; ERROR: canceling statement due to user request -- cancel data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_table GROUP BY 1,2; ERROR: canceling statement due to user request --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM events_summary; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- insert into reference table from a distributed table -- kill coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP BY 1; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP BY 1; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP BY 1; ERROR: canceling statement due to user request -- cancel data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP BY 1; ERROR: canceling statement due to user request --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM events_reference; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -161,64 +161,64 @@ SELECT count(*) FROM events_reference; INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP BY 1; -- kill coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO events_reference_distributed SELECT * FROM events_reference; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO events_reference_distributed SELECT * FROM events_reference; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO events_reference_distributed SELECT * FROM events_reference; ERROR: canceling statement due to user request -- cancel data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO events_reference_distributed SELECT * FROM events_reference; ERROR: canceling statement due to user request --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM events_reference_distributed; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) RESET SEARCH_PATH; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP SCHEMA coordinator_insert_select CASCADE; diff --git a/src/test/regress/expected/failure_multi_dml.out b/src/test/regress/expected/failure_multi_dml.out index 0552e8e80..89f48536a 100644 --- a/src/test/regress/expected/failure_multi_dml.out +++ b/src/test/regress/expected/failure_multi_dml.out @@ -1,7 +1,7 @@ SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SET citus.shard_count = 2; @@ -10,30 +10,30 @@ SET citus.next_shard_id TO 103400; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100; CREATE TABLE dml_test (id integer, name text); SELECT create_distributed_table('dml_test', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) COPY dml_test FROM STDIN WITH CSV; SELECT citus.clear_network_traffic(); - clear_network_traffic ------------------------ - + clear_network_traffic +--------------------------------------------------------------------- + (1 row) ---- test multiple statements spanning multiple shards, ---- at each significant point. These transactions are 2pc -- fail at DELETE SELECT citus.mitmproxy('conn.onQuery(query="^DELETE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; DELETE FROM dml_test WHERE id = 1; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -48,8 +48,8 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; --- shouldn't see any changes performed in failed transaction SELECT * FROM dml_test ORDER BY id ASC; - id | name -----+------- + id | name +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -58,9 +58,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at DELETE SELECT citus.mitmproxy('conn.onQuery(query="^DELETE").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -77,8 +77,8 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; --- shouldn't see any changes performed in failed transaction SELECT * FROM dml_test ORDER BY id ASC; - id | name -----+------- + id | name +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -87,16 +87,16 @@ SELECT * FROM dml_test ORDER BY id ASC; -- fail at INSERT SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; DELETE FROM dml_test WHERE id = 1; DELETE FROM dml_test WHERE id = 2; INSERT INTO dml_test VALUES (5, 'Epsilon'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -107,8 +107,8 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; --- shouldn't see any changes before failed INSERT SELECT * FROM dml_test ORDER BY id ASC; - id | name -----+------- + id | name +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -117,9 +117,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at INSERT SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -134,8 +134,8 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; --- shouldn't see any changes before failed INSERT SELECT * FROM dml_test ORDER BY id ASC; - id | name -----+------- + id | name +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -144,9 +144,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- fail at UPDATE SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -154,7 +154,7 @@ DELETE FROM dml_test WHERE id = 1; DELETE FROM dml_test WHERE id = 2; INSERT INTO dml_test VALUES (5, 'Epsilon'); UPDATE dml_test SET name = 'alpha' WHERE id = 1; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -163,8 +163,8 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; --- shouldn't see any changes after failed UPDATE SELECT * FROM dml_test ORDER BY id ASC; - id | name -----+------- + id | name +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -173,9 +173,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at UPDATE SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -189,8 +189,8 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; --- shouldn't see any changes after failed UPDATE SELECT * FROM dml_test ORDER BY id ASC; - id | name -----+------- + id | name +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -199,9 +199,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- fail at PREPARE TRANSACTION SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- this transaction block will be sent to the coordinator as a remote command to hide the @@ -221,32 +221,32 @@ COMMIT; '], false ); - master_run_on_worker ---------------------------- + master_run_on_worker +--------------------------------------------------------------------- (localhost,57636,t,BEGIN) (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3; - shardid ---------- + shardid +--------------------------------------------------------------------- (0 rows) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) -- shouldn't see any changes after failed PREPARE SELECT * FROM dml_test ORDER BY id ASC; - id | name -----+------- + id | name +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -255,9 +255,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at PREPARE TRANSACTION SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- we'll test for the txn side-effects to ensure it didn't run @@ -270,26 +270,26 @@ UPDATE dml_test SET name = 'gamma' WHERE id = 3; COMMIT; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3; - shardid ---------- + shardid +--------------------------------------------------------------------- (0 rows) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) -- shouldn't see any changes after failed PREPARE SELECT * FROM dml_test ORDER BY id ASC; - id | name -----+------- + id | name +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -298,9 +298,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- fail at COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- hide the error message (it has the PID)... @@ -315,26 +315,26 @@ UPDATE dml_test SET name = 'gamma' WHERE id = 3; COMMIT; SET client_min_messages TO DEFAULT; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3; - shardid ---------- + shardid +--------------------------------------------------------------------- (0 rows) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 1 (1 row) -- should see changes, because of txn recovery SELECT * FROM dml_test ORDER BY id ASC; - id | name -----+--------- + id | name +--------------------------------------------------------------------- 3 | gamma 4 | Delta 5 | Epsilon @@ -342,9 +342,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at COMMITs are ignored by Postgres SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -356,8 +356,8 @@ UPDATE dml_test SET name = 'gamma' WHERE id = 3; COMMIT; -- should see changes, because cancellation is ignored SELECT * FROM dml_test ORDER BY id ASC; - id | name -----+--------- + id | name +--------------------------------------------------------------------- 3 | gamma 4 | Delta 5 | Epsilon @@ -370,18 +370,18 @@ SET citus.shard_count = 1; SET citus.shard_replication_factor = 2; -- two placements CREATE TABLE dml_test (id integer, name text); SELECT create_distributed_table('dml_test', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) COPY dml_test FROM STDIN WITH CSV; ---- test multiple statements against a single shard, but with two placements -- fail at COMMIT (actually COMMIT this time, as no 2pc in use) SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -392,29 +392,29 @@ UPDATE dml_test SET name = 'alpha' WHERE id = 1; UPDATE dml_test SET name = 'gamma' WHERE id = 3; COMMIT; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx --- should see all changes, but they only went to one placement (other is unhealthy) SELECT * FROM dml_test ORDER BY id ASC; - id | name -----+--------- + id | name +--------------------------------------------------------------------- 3 | gamma 4 | Delta 5 | Epsilon (3 rows) SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3; - shardid ---------- + shardid +--------------------------------------------------------------------- 103402 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- drop table and recreate as reference table @@ -423,17 +423,17 @@ SET citus.shard_count = 2; SET citus.shard_replication_factor = 1; CREATE TABLE dml_test (id integer, name text); SELECT create_reference_table('dml_test'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) COPY dml_test FROM STDIN WITH CSV; -- fail at COMMIT (by failing to PREPARE) SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -444,11 +444,11 @@ UPDATE dml_test SET name = 'alpha' WHERE id = 1; UPDATE dml_test SET name = 'gamma' WHERE id = 3; COMMIT; ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx --- shouldn't see any changes after failed COMMIT SELECT * FROM dml_test ORDER BY id ASC; - id | name -----+------- + id | name +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -457,9 +457,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at COMMIT (by cancelling on PREPARE) SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -472,8 +472,8 @@ COMMIT; ERROR: canceling statement due to user request --- shouldn't see any changes after cancelled PREPARE SELECT * FROM dml_test ORDER BY id ASC; - id | name -----+------- + id | name +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -482,9 +482,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- allow connection to allow DROP SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP TABLE dml_test; diff --git a/src/test/regress/expected/failure_multi_row_insert.out b/src/test/regress/expected/failure_multi_row_insert.out index 07f637102..a5ab9b357 100644 --- a/src/test/regress/expected/failure_multi_row_insert.out +++ b/src/test/regress/expected/failure_multi_row_insert.out @@ -10,23 +10,23 @@ SET citus.next_shard_id TO 301000; SET citus.shard_replication_factor TO 1; SELECT pg_backend_pid() as pid \gset SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) CREATE TABLE distributed_table(key int, value int); CREATE TABLE reference_table(value int); SELECT create_distributed_table('distributed_table', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- we'll test failure cases of the following cases: @@ -37,13 +37,13 @@ SELECT create_reference_table('reference_table'); -- (e) multi-row INSERT to a reference table -- Failure and cancellation on multi-row INSERT that hits the same shard with the same value SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO distributed_table VALUES (1,1), (1,2), (1,3); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -52,13 +52,13 @@ DETAIL: server closed the connection unexpectedly -- INSERT INTO distributed_table VALUES (1,4), (1,5), (1,6); -- Failure and cancellation on multi-row INSERT that hits the same shard with different values SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO distributed_table VALUES (1,7), (5,8); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -67,105 +67,105 @@ DETAIL: server closed the connection unexpectedly -- INSERT INTO distributed_table VALUES (1,9), (5,10); -- Failure and cancellation multi-row INSERT that hits multiple shards in a single worker SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO distributed_table VALUES (1,11), (6,12); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO distributed_table VALUES (1,13), (6,14); ERROR: canceling statement due to user request -- Failure and cancellation multi-row INSERT that hits multiple shards in a single worker, happening on the second query SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO distributed_table VALUES (1,15), (6,16); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO distributed_table VALUES (1,17), (6,18); ERROR: canceling statement due to user request -- Failure and cancellation multi-row INSERT that hits multiple shards in multiple workers SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO distributed_table VALUES (2,19),(1,20); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO distributed_table VALUES (2,21), (1,22); ERROR: canceling statement due to user request -- one test for the reference tables for completeness SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO reference_table VALUES (1), (2), (3), (4); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO distributed_table VALUES (1,1), (2,2), (3,3), (4,2), (5,2), (6,2), (7,2); ERROR: canceling statement due to user request -- cancel the second insert over the same connection SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO distributed_table VALUES (1,1), (2,2), (3,3), (4,2), (5,2), (6,2), (7,2); ERROR: canceling statement due to user request -- we've either failed or cancelled all queries, so should be empty SELECT * FROM distributed_table; - key | value ------+------- + key | value +--------------------------------------------------------------------- (0 rows) SELECT * FROM reference_table; - value -------- + value +--------------------------------------------------------------------- (0 rows) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) RESET SEARCH_PATH; diff --git a/src/test/regress/expected/failure_multi_shard_update_delete.out b/src/test/regress/expected/failure_multi_shard_update_delete.out index ff160802e..cebd7f8c6 100644 --- a/src/test/regress/expected/failure_multi_shard_update_delete.out +++ b/src/test/regress/expected/failure_multi_shard_update_delete.out @@ -9,30 +9,30 @@ SET citus.shard_replication_factor TO 1; -- do not cache any connections SET citus.max_cached_conns_per_worker TO 0; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) CREATE TABLE t1(a int PRIMARY KEY, b int, c int); CREATE TABLE r1(a int, b int PRIMARY KEY); CREATE TABLE t2(a int REFERENCES t1(a) ON DELETE CASCADE, b int REFERENCES r1(b) ON DELETE CASCADE, c int); SELECT create_distributed_table('t1', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('r1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('t2', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- insert some data @@ -41,14 +41,14 @@ INSERT INTO t1 VALUES (1, 1, 1), (2, 2, 2), (3, 3, 3); INSERT INTO t2 VALUES (1, 1, 1), (1, 2, 1), (2, 1, 2), (2, 2, 4), (3, 1, 3), (3, 2, 3), (3, 3, 3); SELECT pg_backend_pid() as pid \gset SELECT count(*) FROM t2; - count -------- + count +--------------------------------------------------------------------- 7 (1 row) SHOW citus.multi_shard_commit_protocol ; - citus.multi_shard_commit_protocol ------------------------------------ + citus.multi_shard_commit_protocol +--------------------------------------------------------------------- 2pc (1 row) @@ -56,48 +56,48 @@ SHOW citus.multi_shard_commit_protocol ; -- delete using a filter on non-partition column filter -- test both kill and cancellation SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- issue a multi shard delete DELETE FROM t2 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- verify nothing is deleted SELECT count(*) FROM t2; - count -------- + count +--------------------------------------------------------------------- 7 (1 row) -- kill just one connection SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- verify nothing is deleted SELECT count(*) FROM t2; - count -------- + count +--------------------------------------------------------------------- 7 (1 row) -- cancellation SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- issue a multi shard delete @@ -105,24 +105,24 @@ DELETE FROM t2 WHERE b = 2; ERROR: canceling statement due to user request -- verify nothing is deleted SELECT count(*) FROM t2; - count -------- + count +--------------------------------------------------------------------- 7 (1 row) -- cancel just one connection SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DELETE FROM t2 WHERE b = 2; ERROR: canceling statement due to user request -- verify nothing is deleted SELECT count(*) FROM t2; - count -------- + count +--------------------------------------------------------------------- 7 (1 row) @@ -132,54 +132,54 @@ SELECT count(*) FROM t2; -- delete using a filter on non-partition column filter -- test both kill and cancellation SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 -----+---- + b2 | c4 +--------------------------------------------------------------------- 3 | 1 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- issue a multi shard update UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 -----+---- + b2 | c4 +--------------------------------------------------------------------- 3 | 1 (1 row) -- kill just one connection SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 -----+---- + b2 | c4 +--------------------------------------------------------------------- 3 | 1 (1 row) -- cancellation SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- issue a multi shard update @@ -187,24 +187,24 @@ UPDATE t2 SET c = 4 WHERE b = 2; ERROR: canceling statement due to user request -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 -----+---- + b2 | c4 +--------------------------------------------------------------------- 3 | 1 (1 row) -- cancel just one connection SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) UPDATE t2 SET c = 4 WHERE b = 2; ERROR: canceling statement due to user request -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 -----+---- + b2 | c4 +--------------------------------------------------------------------- 3 | 1 (1 row) @@ -214,48 +214,48 @@ SET citus.multi_shard_commit_protocol TO '1PC'; -- delete using a filter on non-partition column filter -- test both kill and cancellation SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- issue a multi shard delete DELETE FROM t2 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- verify nothing is deleted SELECT count(*) FROM t2; - count -------- + count +--------------------------------------------------------------------- 7 (1 row) -- kill just one connection SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- verify nothing is deleted SELECT count(*) FROM t2; - count -------- + count +--------------------------------------------------------------------- 7 (1 row) -- cancellation SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- issue a multi shard delete @@ -263,24 +263,24 @@ DELETE FROM t2 WHERE b = 2; ERROR: canceling statement due to user request -- verify nothing is deleted SELECT count(*) FROM t2; - count -------- + count +--------------------------------------------------------------------- 7 (1 row) -- cancel just one connection SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DELETE FROM t2 WHERE b = 2; ERROR: canceling statement due to user request -- verify nothing is deleted SELECT count(*) FROM t2; - count -------- + count +--------------------------------------------------------------------- 7 (1 row) @@ -290,54 +290,54 @@ SELECT count(*) FROM t2; -- delete using a filter on non-partition column filter -- test both kill and cancellation SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 -----+---- + b2 | c4 +--------------------------------------------------------------------- 3 | 1 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- issue a multi shard update UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 -----+---- + b2 | c4 +--------------------------------------------------------------------- 3 | 1 (1 row) -- kill just one connection SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 -----+---- + b2 | c4 +--------------------------------------------------------------------- 3 | 1 (1 row) -- cancellation SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- issue a multi shard update @@ -345,24 +345,24 @@ UPDATE t2 SET c = 4 WHERE b = 2; ERROR: canceling statement due to user request -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 -----+---- + b2 | c4 +--------------------------------------------------------------------- 3 | 1 (1 row) -- cancel just one connection SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) UPDATE t2 SET c = 4 WHERE b = 2; ERROR: canceling statement due to user request -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 -----+---- + b2 | c4 +--------------------------------------------------------------------- 3 | 1 (1 row) @@ -377,72 +377,72 @@ RESET citus.multi_shard_commit_protocol; -- it is safe to remove them without reducing any -- test coverage SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- check counts before delete SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b2 ----- + b2 +--------------------------------------------------------------------- 3 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DELETE FROM r1 WHERE a = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- verify nothing is deleted SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b2 ----- + b2 +--------------------------------------------------------------------- 3 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- verify nothing is deleted SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b2 ----- + b2 +--------------------------------------------------------------------- 3 (1 row) -- test update with subquery pull SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) CREATE TABLE t3 AS SELECT * FROM t2; SELECT create_distributed_table('t3', 'a'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM t3 ORDER BY 1, 2, 3; - a | b | c ----+---+--- + a | b | c +--------------------------------------------------------------------- 1 | 1 | 1 1 | 2 | 1 2 | 1 | 2 @@ -453,9 +453,9 @@ SELECT * FROM t3 ORDER BY 1, 2, 3; (7 rows) SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) UPDATE t3 SET c = q.c FROM ( @@ -465,17 +465,17 @@ RETURNING *; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx --- verify nothing is updated SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM t3 ORDER BY 1, 2, 3; - a | b | c ----+---+--- + a | b | c +--------------------------------------------------------------------- 1 | 1 | 1 1 | 2 | 1 2 | 1 | 2 @@ -487,29 +487,29 @@ SELECT * FROM t3 ORDER BY 1, 2, 3; -- kill update part SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE multi_shard.t3_201009").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) UPDATE t3 SET c = q.c FROM ( SELECT b, max(c) as c FROM t2 GROUP BY b) q WHERE t3.b = q.b RETURNING *; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. --- verify nothing is updated SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM t3 ORDER BY 1, 2, 3; - a | b | c ----+---+--- + a | b | c +--------------------------------------------------------------------- 1 | 1 | 1 1 | 2 | 1 2 | 1 | 2 @@ -524,165 +524,165 @@ SELECT * FROM t3 ORDER BY 1, 2, 3; -- use a different set of table SET citus.shard_replication_factor to 2; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP TABLE t3; CREATE TABLE t3 AS SELECT * FROM t2; SELECT create_distributed_table('t3', 'a'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 -----+---- + b1 | b2 +--------------------------------------------------------------------- 3 | 3 (1 row) -- prevent update of one replica of one shard SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t3_201013").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 -----+---- + b1 | b2 +--------------------------------------------------------------------- 3 | 3 (1 row) -- fail only one update verify transaction is rolled back correctly BEGIN; SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b1 | b2 -----+---- + b1 | b2 +--------------------------------------------------------------------- 3 | 3 (1 row) SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 -----+---- + b1 | b2 +--------------------------------------------------------------------- 3 | 3 (1 row) UPDATE t2 SET b = 2 WHERE b = 1; -- verify update is performed on t2 SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b1 | b2 -----+---- + b1 | b2 +--------------------------------------------------------------------- 0 | 6 (1 row) -- following will fail UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. END; -- verify everything is rolled back SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b1 | b2 -----+---- + b1 | b2 +--------------------------------------------------------------------- 3 | 3 (1 row) SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 -----+---- + b1 | b2 +--------------------------------------------------------------------- 3 | 3 (1 row) UPDATE t3 SET b = 1 WHERE b = 2 RETURNING *; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 -----+---- + b1 | b2 +--------------------------------------------------------------------- 3 | 3 (1 row) -- switch to 1PC SET citus.multi_shard_commit_protocol TO '1PC'; SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 -----+---- + b1 | b2 +--------------------------------------------------------------------- 3 | 3 (1 row) UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 -----+---- + b1 | b2 +--------------------------------------------------------------------- 3 | 3 (1 row) -- fail only one update verify transaction is rolled back correctly BEGIN; SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b1 | b2 -----+---- + b1 | b2 +--------------------------------------------------------------------- 3 | 3 (1 row) SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 -----+---- + b1 | b2 +--------------------------------------------------------------------- 3 | 3 (1 row) UPDATE t2 SET b = 2 WHERE b = 1; -- verify update is performed on t2 SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b1 | b2 -----+---- + b1 | b2 +--------------------------------------------------------------------- 0 | 6 (1 row) -- following will fail UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. END; -- verify everything is rolled back SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b1 | b2 -----+---- + b1 | b2 +--------------------------------------------------------------------- 3 | 3 (1 row) SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 -----+---- + b1 | b2 +--------------------------------------------------------------------- 3 | 3 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) RESET SEARCH_PATH; diff --git a/src/test/regress/expected/failure_mx_metadata_sync.out b/src/test/regress/expected/failure_mx_metadata_sync.out index c47fbe278..a4aeb7704 100644 --- a/src/test/regress/expected/failure_mx_metadata_sync.out +++ b/src/test/regress/expected/failure_mx_metadata_sync.out @@ -9,149 +9,149 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT pg_backend_pid() as pid \gset SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) CREATE TABLE t1 (id int PRIMARY KEY); SELECT create_distributed_table('t1', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO t1 SELECT x FROM generate_series(1,100) AS f(x); -- Initial metadata status SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; - hasmetadata -------------- + hasmetadata +--------------------------------------------------------------------- f (1 row) -- Failure to set groupid in the worker SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- Failure to drop all tables in pg_dist_partition SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- Failure to truncate pg_dist_node in the worker SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- Failure to populate pg_dist_node in the worker SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- Verify that coordinator knows worker does not have valid metadata SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; - hasmetadata -------------- + hasmetadata +--------------------------------------------------------------------- f (1 row) -- Verify we can sync metadata after unsuccessful attempts SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; - hasmetadata -------------- + hasmetadata +--------------------------------------------------------------------- t (1 row) -- Check failures on DDL command propagation CREATE TABLE t2 (id int PRIMARY KEY); SELECT citus.mitmproxy('conn.onParse(query="^INSERT INTO pg_dist_placement").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('t2', 'id'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.onParse(query="^INSERT INTO pg_dist_shard").cancel(' || :pid || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('t2', 'id'); @@ -160,8 +160,8 @@ ERROR: canceling statement due to user request SELECT count(*) > 0 AS is_table_distributed FROM pg_dist_partition WHERE logicalrelid='t2'::regclass; - is_table_distributed ----------------------- + is_table_distributed +--------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/failure_ref_tables.out b/src/test/regress/expected/failure_ref_tables.out index 5727cb90c..a397f8dfd 100644 --- a/src/test/regress/expected/failure_ref_tables.out +++ b/src/test/regress/expected/failure_ref_tables.out @@ -1,99 +1,99 @@ SET citus.next_shard_id TO 100500; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) CREATE TABLE ref_table (key int, value int); SELECT create_reference_table('ref_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) \copy ref_table FROM stdin delimiter ','; SELECT citus.clear_network_traffic(); - clear_network_traffic ------------------------ - + clear_network_traffic +--------------------------------------------------------------------- + (1 row) SELECT COUNT(*) FROM ref_table; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) -- verify behavior of single INSERT; should fail to execute SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO ref_table VALUES (5, 6); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT COUNT(*) FROM ref_table WHERE key=5; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- verify behavior of UPDATE ... RETURNING; should not execute SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) UPDATE ref_table SET key=7 RETURNING value; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT COUNT(*) FROM ref_table WHERE key=7; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- verify fix to #2214; should raise error and fail to execute SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; DELETE FROM ref_table WHERE key=5; UPDATE ref_table SET key=value; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. COMMIT; SELECT COUNT(*) FROM ref_table WHERE key=value; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- all shards should still be healthy SELECT COUNT(*) FROM pg_dist_shard_placement WHERE shardstate = 3; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- ==== Clean up, we're done here ==== SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP TABLE ref_table; diff --git a/src/test/regress/expected/failure_savepoints.out b/src/test/regress/expected/failure_savepoints.out index 02163c4a3..a2a113e9a 100644 --- a/src/test/regress/expected/failure_savepoints.out +++ b/src/test/regress/expected/failure_savepoints.out @@ -4,9 +4,9 @@ -- the placement commands fail. Otherwise, we might mark the placement -- as invalid and continue with a WARNING. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SET citus.shard_count = 2; @@ -18,9 +18,9 @@ CREATE TABLE artists ( name text NOT NULL ); SELECT create_distributed_table('artists', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- add some data @@ -30,40 +30,40 @@ INSERT INTO artists VALUES (3, 'Claude Monet'); INSERT INTO artists VALUES (4, 'William Kurelek'); -- simply fail at SAVEPOINT SELECT citus.mitmproxy('conn.onQuery(query="^SAVEPOINT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; INSERT INTO artists VALUES (5, 'Asher Lev'); SAVEPOINT s1; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: connection error: localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: connection error: localhost:xxxxx DETAIL: connection not open WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx DELETE FROM artists WHERE id=4; ERROR: current transaction is aborted, commands ignored until end of transaction block RELEASE SAVEPOINT s1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT * FROM artists WHERE id IN (4, 5); - id | name -----+----------------- + id | name +--------------------------------------------------------------------- 4 | William Kurelek (1 row) -- fail at RELEASE SELECT citus.mitmproxy('conn.onQuery(query="^RELEASE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -73,29 +73,29 @@ DELETE FROM artists WHERE id=4; RELEASE SAVEPOINT s1; WARNING: AbortSubTransaction while in COMMIT state WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: connection error: localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: connection error: localhost:xxxxx DETAIL: connection not open WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: savepoint "savepoint_2" does not exist -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx ROLLBACK; SELECT * FROM artists WHERE id IN (4, 5); - id | name -----+----------------- + id | name +--------------------------------------------------------------------- 4 | William Kurelek (1 row) -- fail at ROLLBACK SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -104,22 +104,22 @@ SAVEPOINT s1; DELETE FROM artists WHERE id=4; ROLLBACK TO SAVEPOINT s1; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COMMIT; -ERROR: could not make changes to shard 100950 on any node +ERROR: could not make changes to shard xxxxx on any node SELECT * FROM artists WHERE id IN (4, 5); - id | name -----+----------------- + id | name +--------------------------------------------------------------------- 4 | William Kurelek (1 row) -- fail at second RELEASE SELECT citus.mitmproxy('conn.onQuery(query="^RELEASE").after(1).kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -131,27 +131,27 @@ INSERT INTO artists VALUES (5, 'Jacob Kahn'); RELEASE SAVEPOINT s2; WARNING: AbortSubTransaction while in COMMIT state WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: connection error: localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: connection error: localhost:xxxxx DETAIL: connection not open WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COMMIT; SELECT * FROM artists WHERE id IN (4, 5); - id | name -----+----------------- + id | name +--------------------------------------------------------------------- 4 | William Kurelek (1 row) -- fail at second ROLLBACK SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").after(1).kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -162,21 +162,21 @@ SAVEPOINT s2; DELETE FROM artists WHERE id=5; ROLLBACK TO SAVEPOINT s2; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COMMIT; -ERROR: could not make changes to shard 100950 on any node +ERROR: could not make changes to shard xxxxx on any node SELECT * FROM artists WHERE id IN (4, 5); - id | name -----+----------------- + id | name +--------------------------------------------------------------------- 4 | William Kurelek (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^RELEASE").after(1).kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- Release after rollback @@ -191,14 +191,14 @@ ROLLBACK TO s2; RELEASE SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=7; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- Recover from errors @@ -213,14 +213,14 @@ ROLLBACK TO SAVEPOINT s1; WARNING: connection not open WARNING: connection not open WARNING: connection not open -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx WARNING: connection not open WARNING: connection not open COMMIT; -ERROR: could not make changes to shard 100950 on any node +ERROR: could not make changes to shard xxxxx on any node SELECT * FROM artists WHERE id=6; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) -- replication factor > 1 @@ -232,23 +232,23 @@ CREATE TABLE researchers ( SET citus.shard_count = 1; SET citus.shard_replication_factor = 2; -- single shard, on both workers SELECT create_distributed_table('researchers', 'lab_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- simply fail at SAVEPOINT SELECT citus.mitmproxy('conn.onQuery(query="^SAVEPOINT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; INSERT INTO researchers VALUES (7, 4, 'Jan Plaza'); SAVEPOINT s1; WARNING: connection not open -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx WARNING: connection not open WARNING: connection not open ERROR: connection not open @@ -261,24 +261,24 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; -- should see correct results from healthy placement and one bad placement SELECT * FROM researchers WHERE lab_id = 4; - id | lab_id | name -----+--------+------ + id | lab_id | name +--------------------------------------------------------------------- (0 rows) UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE shardstate = 3 AND shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'researchers'::regclass ) RETURNING placementid; - placementid -------------- + placementid +--------------------------------------------------------------------- (0 rows) TRUNCATE researchers; -- fail at rollback SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -290,27 +290,27 @@ WARNING: connection not open WARNING: connection not open RELEASE SAVEPOINT s1; COMMIT; -ERROR: failure on connection marked as essential: localhost:9060 +ERROR: failure on connection marked as essential: localhost:xxxxx -- should see correct results from healthy placement and one bad placement SELECT * FROM researchers WHERE lab_id = 4; - id | lab_id | name -----+--------+------ + id | lab_id | name +--------------------------------------------------------------------- (0 rows) UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE shardstate = 3 AND shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'researchers'::regclass ) RETURNING placementid; - placementid -------------- + placementid +--------------------------------------------------------------------- (0 rows) TRUNCATE researchers; -- fail at release SELECT citus.mitmproxy('conn.onQuery(query="^RELEASE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -321,7 +321,7 @@ ROLLBACK TO s1; RELEASE SAVEPOINT s1; WARNING: AbortSubTransaction while in COMMIT state WARNING: connection not open -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx WARNING: connection not open WARNING: connection not open WARNING: savepoint "savepoint_3" does not exist @@ -329,24 +329,24 @@ ERROR: connection not open COMMIT; -- should see correct results from healthy placement and one bad placement SELECT * FROM researchers WHERE lab_id = 4; - id | lab_id | name -----+--------+------ + id | lab_id | name +--------------------------------------------------------------------- (0 rows) UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE shardstate = 3 AND shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'researchers'::regclass ) RETURNING placementid; - placementid -------------- + placementid +--------------------------------------------------------------------- (0 rows) TRUNCATE researchers; -- clean up SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP TABLE artists; diff --git a/src/test/regress/expected/failure_setup.out b/src/test/regress/expected/failure_setup.out index 195a738b1..4cbb4b0a4 100644 --- a/src/test/regress/expected/failure_setup.out +++ b/src/test/regress/expected/failure_setup.out @@ -1,19 +1,19 @@ SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- add the workers SELECT master_add_node('localhost', :worker_1_port); - master_add_node ------------------ + master_add_node +--------------------------------------------------------------------- 1 (1 row) SELECT master_add_node('localhost', :worker_2_proxy_port); -- an mitmproxy which forwards to the second worker - master_add_node ------------------ + master_add_node +--------------------------------------------------------------------- 2 (1 row) diff --git a/src/test/regress/expected/failure_single_mod.out b/src/test/regress/expected/failure_single_mod.out index da2cb8b40..65c5ffbef 100644 --- a/src/test/regress/expected/failure_single_mod.out +++ b/src/test/regress/expected/failure_single_mod.out @@ -1,39 +1,39 @@ SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT citus.clear_network_traffic(); - clear_network_traffic ------------------------ - + clear_network_traffic +--------------------------------------------------------------------- + (1 row) SET citus.shard_count = 2; SET citus.shard_replication_factor = 2; CREATE TABLE mod_test (key int, value text); SELECT create_distributed_table('mod_test', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- verify behavior of single INSERT; should mark shard as failed SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO mod_test VALUES (2, 6); -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT COUNT(*) FROM mod_test WHERE key=2; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -42,39 +42,39 @@ UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass ) AND shardstate = 3 RETURNING placementid; - placementid -------------- + placementid +--------------------------------------------------------------------- 125 (1 row) TRUNCATE mod_test; -- verify behavior of UPDATE ... RETURNING; should mark as failed SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) INSERT INTO mod_test VALUES (2, 6); SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) UPDATE mod_test SET value='ok' WHERE key=2 RETURNING key; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. - key ------ + key +--------------------------------------------------------------------- 2 (1 row) SELECT COUNT(*) FROM mod_test WHERE value='ok'; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -83,8 +83,8 @@ UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass ) AND shardstate = 3 RETURNING placementid; - placementid -------------- + placementid +--------------------------------------------------------------------- 125 (1 row) @@ -92,9 +92,9 @@ TRUNCATE mod_test; -- verify behavior of multi-statement modifications to a single shard -- should succeed but mark a placement as failed SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -102,14 +102,14 @@ INSERT INTO mod_test VALUES (2, 6); INSERT INTO mod_test VALUES (2, 7); DELETE FROM mod_test WHERE key=2 AND value = '7'; UPDATE mod_test SET value='ok' WHERE key=2; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. COMMIT; SELECT COUNT(*) FROM mod_test WHERE key=2; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -118,8 +118,8 @@ UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass ) AND shardstate = 3 RETURNING placementid; - placementid -------------- + placementid +--------------------------------------------------------------------- 125 (1 row) diff --git a/src/test/regress/expected/failure_single_select.out b/src/test/regress/expected/failure_single_select.out index 2f07f7f5c..d5087779f 100644 --- a/src/test/regress/expected/failure_single_select.out +++ b/src/test/regress/expected/failure_single_select.out @@ -1,80 +1,80 @@ SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT citus.clear_network_traffic(); - clear_network_traffic ------------------------ - + clear_network_traffic +--------------------------------------------------------------------- + (1 row) SET citus.shard_count = 2; SET citus.shard_replication_factor = 2; CREATE TABLE select_test (key int, value text); SELECT create_distributed_table('select_test', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- put data in shard for which mitm node is first placement INSERT INTO select_test VALUES (3, 'test data'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM select_test WHERE key = 3; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. - key | value ------+----------- + key | value +--------------------------------------------------------------------- 3 | test data (1 row) SELECT * FROM select_test WHERE key = 3; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. - key | value ------+----------- + key | value +--------------------------------------------------------------------- 3 | test data (1 row) -- kill after first SELECT; txn should work (though placement marked bad) SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; INSERT INTO select_test VALUES (3, 'more data'); SELECT * FROM select_test WHERE key = 3; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. - key | value ------+----------- + key | value +--------------------------------------------------------------------- 3 | test data 3 | more data (2 rows) INSERT INTO select_test VALUES (3, 'even more data'); SELECT * FROM select_test WHERE key = 3; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. - key | value ------+---------------- + key | value +--------------------------------------------------------------------- 3 | test data 3 | more data 3 | even more data @@ -91,9 +91,9 @@ TRUNCATE select_test; -- put data in shard for which mitm node is first placement INSERT INTO select_test VALUES (3, 'test data'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM select_test WHERE key = 3; @@ -102,9 +102,9 @@ SELECT * FROM select_test WHERE key = 3; ERROR: canceling statement due to user request -- cancel after first SELECT; txn should fail and nothing should be marked as invalid SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -117,8 +117,8 @@ SELECT DISTINCT shardstate FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'select_test'::regclass ); - shardstate ------------- + shardstate +--------------------------------------------------------------------- 1 (1 row) @@ -126,16 +126,16 @@ TRUNCATE select_test; -- cancel the second query -- error after second SELECT; txn should fail SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; INSERT INTO select_test VALUES (3, 'more data'); SELECT * FROM select_test WHERE key = 3; - key | value ------+----------- + key | value +--------------------------------------------------------------------- 3 | more data (1 row) @@ -145,41 +145,41 @@ ERROR: canceling statement due to user request COMMIT; -- error after second SELECT; txn should work (though placement marked bad) SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).reset()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; INSERT INTO select_test VALUES (3, 'more data'); SELECT * FROM select_test WHERE key = 3; - key | value ------+----------- + key | value +--------------------------------------------------------------------- 3 | more data (1 row) INSERT INTO select_test VALUES (3, 'even more data'); SELECT * FROM select_test WHERE key = 3; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. - key | value ------+---------------- + key | value +--------------------------------------------------------------------- 3 | more data 3 | even more data (2 rows) COMMIT; SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(2).kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) @@ -187,7 +187,7 @@ SELECT recover_prepared_transactions(); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- bug from https://github.com/citusdata/citus/issues/1926 SET citus.max_cached_conns_per_worker TO 0; -- purge cache DROP TABLE select_test; @@ -195,40 +195,40 @@ SET citus.shard_count = 2; SET citus.shard_replication_factor = 1; CREATE TABLE select_test (key int, value text); SELECT create_distributed_table('select_test', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET citus.max_cached_conns_per_worker TO 1; -- allow connection to be cached INSERT INTO select_test VALUES (1, 'test data'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM select_test WHERE key = 1; - key | value ------+----------- + key | value +--------------------------------------------------------------------- 1 | test data (1 row) SELECT * FROM select_test WHERE key = 1; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -- now the same test with query cancellation SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM select_test WHERE key = 1; - key | value ------+----------- + key | value +--------------------------------------------------------------------- 1 | test data (1 row) diff --git a/src/test/regress/expected/failure_test_helpers.out b/src/test/regress/expected/failure_test_helpers.out index 5362412a1..a66749dff 100644 --- a/src/test/regress/expected/failure_test_helpers.out +++ b/src/test/regress/expected/failure_test_helpers.out @@ -5,8 +5,8 @@ ALTER SYSTEM SET citus.distributed_deadlock_detection_factor TO -1; ALTER SYSTEM SET citus.recover_2pc_interval TO -1; ALTER SYSTEM set citus.enable_statistics_collection TO false; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/failure_truncate.out b/src/test/regress/expected/failure_truncate.out index 4c86ff171..28cbe1b46 100644 --- a/src/test/regress/expected/failure_truncate.out +++ b/src/test/regress/expected/failure_truncate.out @@ -1,6 +1,6 @@ --- --- Test TRUNCATE command failures --- +-- +-- Test TRUNCATE command failures +-- CREATE SCHEMA truncate_failure; SET search_path TO 'truncate_failure'; SET citus.next_shard_id TO 120000; @@ -11,9 +11,9 @@ SET citus.max_cached_conns_per_worker TO 0; -- use a predictable number of connections per task SET citus.force_max_query_parallelization TO on; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- we'll start with replication factor 1, 1PC and parallel mode @@ -22,219 +22,219 @@ SET citus.shard_count = 4; SET citus.shard_replication_factor = 1; CREATE TABLE test_table (key int, value int); SELECT create_distributed_table('test_table', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -CREATE VIEW unhealthy_shard_count AS - SELECT count(*) - FROM pg_dist_shard_placement pdsp - JOIN - pg_dist_shard pds - ON pdsp.shardid=pds.shardid +CREATE VIEW unhealthy_shard_count AS + SELECT count(*) + FROM pg_dist_shard_placement pdsp + JOIN + pg_dist_shard pds + ON pdsp.shardid=pds.shardid WHERE logicalrelid='truncate_failure.test_table'::regclass AND shardstate != 1; --- in the first test, kill just in the first +-- in the first test, kill just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) --- cancel just in the first +-- cancel just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test_table").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test_table").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends COMMIT -- One shard should not get truncated but the other should --- since it is sent from another connection. +-- since it is sent from another connection. -- Thus, we should see a partially successful truncate -- Note: This is the result of using 1pc and there is no way to recover from it SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 8 (1 row) @@ -242,30 +242,30 @@ SELECT count(*) FROM test_table; TRUNCATE test_table; INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -- cancel as soon as the coordinator sends COMMIT --- interrupts are held during COMMIT/ROLLBACK, so the command +-- interrupts are held during COMMIT/ROLLBACK, so the command -- should have been applied without any issues since cancel is ignored SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -273,74 +273,74 @@ SELECT count(*) FROM test_table; TRUNCATE test_table; INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); SET client_min_messages TO WARNING; --- now kill just after the worker sends response to +-- now kill just after the worker sends response to -- COMMIT command, so we'll have lots of warnings but the command -- should have been committed both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: could not commit transaction for shard 120002 on any active node -WARNING: could not commit transaction for shard 120000 on any active node +CONTEXT: while executing command on localhost:xxxxx +WARNING: could not commit transaction for shard xxxxx on any active node +WARNING: could not commit transaction for shard xxxxx on any active node SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SET client_min_messages TO ERROR; INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); --- now cancel just after the worker sends response to +-- now cancel just after the worker sends response to -- but Postgres doesn't accept interrupts during COMMIT and ROLLBACK -- so should not cancel at all, so not an effective test but adding in -- case Citus messes up this behaviour SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -349,149 +349,149 @@ INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); CREATE TABLE reference_table(i int UNIQUE); INSERT INTO reference_table SELECT x FROM generate_series(1,20) as f(x); SELECT create_reference_table('reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table ADD CONSTRAINT foreign_key FOREIGN KEY (value) REFERENCES reference_table(i); -- immediately kill when we see prepare transaction to see if the command -- still cascaded to referencing table or failed successfuly SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE reference_table CASCADE; ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) SELECT count(*) FROM reference_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- immediately cancel when we see prepare transaction to see if the command -- still cascaded to referencing table or failed successfuly SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE reference_table CASCADE; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) SELECT count(*) FROM reference_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) --- immediately kill when we see cascading TRUNCATE on the hash table to see +-- immediately kill when we see cascading TRUNCATE on the hash table to see -- rollbacked properly SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE").after(2).kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE reference_table CASCADE; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) SELECT count(*) FROM reference_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) --- immediately cancel when we see cascading TRUNCATE on the hash table to see +-- immediately cancel when we see cascading TRUNCATE on the hash table to see -- if the command still cascaded to referencing table or failed successfuly SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE").after(2).cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE reference_table CASCADE; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) SELECT count(*) FROM reference_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) @@ -499,35 +499,35 @@ SELECT count(*) FROM reference_table; -- to see if the command still cascaded to referencing table or -- failed successfuly SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE reference_table CASCADE; ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) @@ -535,339 +535,339 @@ SELECT count(*) FROM test_table; -- to see if the command still cascaded to referencing table or -- failed successfuly SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE reference_table CASCADE; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- now, lets test with 2PC SET citus.multi_shard_commit_protocol TO '2pc'; --- in the first test, kill just in the first +-- in the first test, kill just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) --- cancel just in the first +-- cancel just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE truncate_failure.test_table").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE truncate_failure.test_table").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- killing on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- we should be able to revocer the transaction and -- see that the command is rollbacked SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 2 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- cancelling on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- we should be able to revocer the transaction and -- see that the command is rollbacked SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- killing on command complete of COMMIT PREPARE, we should see that the command succeeds -- and all the workers committed SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT PREPARED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) -- we shouldn't have any prepared transactions in the workers SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) --- Since we kill connections to one worker after commit arrives but the +-- Since we kill connections to one worker after commit arrives but the -- other worker connections are healthy, we cannot commit on 1 worker -- which has 2 active shard placements, but the other does. That's why -- we expect to see 2 recovered prepared transactions. SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 2 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -876,53 +876,53 @@ INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -- fail just after the coordinator sends the ROLLBACK -- so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; TRUNCATE test_table; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) --- but now kill just after the worker sends response to +-- but now kill just after the worker sends response to -- ROLLBACK command, so we'll have lots of warnings but the command -- should have been rollbacked both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="^ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; TRUNCATE test_table; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) @@ -934,297 +934,297 @@ SET citus.shard_replication_factor = 2; DROP TABLE test_table CASCADE; CREATE TABLE test_table (key int, value int); SELECT create_distributed_table('test_table', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -CREATE VIEW unhealthy_shard_count AS - SELECT count(*) - FROM pg_dist_shard_placement pdsp - JOIN - pg_dist_shard pds - ON pdsp.shardid=pds.shardid +CREATE VIEW unhealthy_shard_count AS + SELECT count(*) + FROM pg_dist_shard_placement pdsp + JOIN + pg_dist_shard pds + ON pdsp.shardid=pds.shardid WHERE logicalrelid='truncate_failure.test_table'::regclass AND shardstate != 1; --- in the first test, kill just in the first +-- in the first test, kill just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) --- cancel just in the first +-- cancel just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test_table").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test_table").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- killing on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- we should be able to revocer the transaction and -- see that the command is rollbacked SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 4 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) -- killing on command complete of COMMIT PREPARE, we should see that the command succeeds -- and all the workers committed SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT PREPARED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- we shouldn't have any prepared transactions in the workers SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) --- Since we kill connections to one worker after commit arrives but the +-- Since we kill connections to one worker after commit arrives but the -- other worker connections are healthy, we cannot commit on 1 worker -- which has 4 active shard placements (2 shards, replication factor=2), -- but the other does. That's why we expect to see 4 recovered prepared -- transactions. SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 4 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -1233,65 +1233,65 @@ INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -- fail just after the coordinator sends the ROLLBACK -- so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; TRUNCATE test_table; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) --- but now kill just after the worker sends response to +-- but now kill just after the worker sends response to -- ROLLBACK command, so we'll have lots of warnings but the command -- should have been rollbacked both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="^ROLLBACK").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) BEGIN; TRUNCATE test_table; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SELECT * FROM unhealthy_shard_count; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 20 (1 row) diff --git a/src/test/regress/expected/failure_vacuum.out b/src/test/regress/expected/failure_vacuum.out index 430d21b6d..66801ec22 100644 --- a/src/test/regress/expected/failure_vacuum.out +++ b/src/test/regress/expected/failure_vacuum.out @@ -3,9 +3,9 @@ -- get WARNINGs instead of ERRORs. SET citus.next_shard_id TO 12000000; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SET citus.shard_count = 1; @@ -13,52 +13,52 @@ SET citus.shard_replication_factor = 2; -- one shard per worker SET citus.multi_shard_commit_protocol TO '1pc'; CREATE TABLE vacuum_test (key int, value int); SELECT create_distributed_table('vacuum_test', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT citus.clear_network_traffic(); - clear_network_traffic ------------------------ - + clear_network_traffic +--------------------------------------------------------------------- + (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) VACUUM vacuum_test; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ANALYZE vacuum_test; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ANALYZE vacuum_test; -- ANALYZE transactions being critical is an open question, see #2430 -- show that we marked as INVALID on COMMIT FAILURE -SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND +SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND shardid in ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass); - shardid | shardstate -----------+------------ + shardid | shardstate +--------------------------------------------------------------------- 12000000 | 3 (1 row) @@ -68,66 +68,66 @@ WHERE shardid IN ( ); -- the same tests with cancel SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) VACUUM vacuum_test; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ANALYZE vacuum_test; ERROR: canceling statement due to user request -- cancel during COMMIT should be ignored SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ANALYZE vacuum_test; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) CREATE TABLE other_vacuum_test (key int, value int); SELECT create_distributed_table('other_vacuum_test', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) VACUUM vacuum_test, other_vacuum_test; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) VACUUM vacuum_test, other_vacuum_test; ERROR: canceling statement due to user request -- ==== Clean up, we're done here ==== SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP TABLE vacuum_test, other_vacuum_test; diff --git a/src/test/regress/expected/failure_vacuum_1.out b/src/test/regress/expected/failure_vacuum_1.out index 52cb95d32..c13096f6d 100644 --- a/src/test/regress/expected/failure_vacuum_1.out +++ b/src/test/regress/expected/failure_vacuum_1.out @@ -3,9 +3,9 @@ -- get WARNINGs instead of ERRORs. SET citus.next_shard_id TO 12000000; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) SET citus.shard_count = 1; @@ -13,52 +13,52 @@ SET citus.shard_replication_factor = 2; -- one shard per worker SET citus.multi_shard_commit_protocol TO '1pc'; CREATE TABLE vacuum_test (key int, value int); SELECT create_distributed_table('vacuum_test', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT citus.clear_network_traffic(); - clear_network_traffic ------------------------ - + clear_network_traffic +--------------------------------------------------------------------- + (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) VACUUM vacuum_test; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ANALYZE vacuum_test; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ANALYZE vacuum_test; -- ANALYZE transactions being critical is an open question, see #2430 -- show that we marked as INVALID on COMMIT FAILURE -SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND +SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND shardid in ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass); - shardid | shardstate -----------+------------ + shardid | shardstate +--------------------------------------------------------------------- 12000000 | 3 (1 row) @@ -68,67 +68,63 @@ WHERE shardid IN ( ); -- the same tests with cancel SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) VACUUM vacuum_test; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ANALYZE vacuum_test; ERROR: canceling statement due to user request -- cancel during COMMIT should be ignored SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) ANALYZE vacuum_test; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) CREATE TABLE other_vacuum_test (key int, value int); SELECT create_distributed_table('other_vacuum_test', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").kill()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) VACUUM vacuum_test, other_vacuum_test; ERROR: syntax error at or near "," -LINE 1: VACUUM vacuum_test, other_vacuum_test; - ^ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").cancel(' || pg_backend_pid() || ')'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) VACUUM vacuum_test, other_vacuum_test; ERROR: syntax error at or near "," -LINE 1: VACUUM vacuum_test, other_vacuum_test; - ^ -- ==== Clean up, we're done here ==== SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ------------ - + mitmproxy +--------------------------------------------------------------------- + (1 row) DROP TABLE vacuum_test, other_vacuum_test; diff --git a/src/test/regress/expected/fast_path_router_modify.out b/src/test/regress/expected/fast_path_router_modify.out index 80546438e..c83c9dca0 100644 --- a/src/test/regress/expected/fast_path_router_modify.out +++ b/src/test/regress/expected/fast_path_router_modify.out @@ -9,24 +9,24 @@ SET citus.enable_fast_path_router_planner TO true; SET citus.shard_replication_factor TO 1; CREATE TABLE modify_fast_path(key int, value_1 int, value_2 text); SELECT create_distributed_table('modify_fast_path', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET citus.shard_replication_factor TO 2; CREATE TABLE modify_fast_path_replication_2(key int, value_1 int, value_2 text); SELECT create_distributed_table('modify_fast_path_replication_2', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE modify_fast_path_reference(key int, value_1 int, value_2 text); SELECT create_reference_table('modify_fast_path_reference'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- show the output @@ -115,17 +115,17 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - key | value_1 | value_2 ------+---------+--------- - 1 | 1 | + key | value_1 | value_2 +--------------------------------------------------------------------- + 1 | 1 | (1 row) INSERT INTO modify_fast_path (key, value_1) VALUES (2,1) RETURNING value_1, key; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - value_1 | key ----------+----- + value_1 | key +--------------------------------------------------------------------- 1 | 2 (1 row) @@ -134,8 +134,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - ?column? | ?column? -----------+---------- + ?column? | ?column? +--------------------------------------------------------------------- 15 | 16 (1 row) @@ -146,18 +146,18 @@ ERROR: non-IMMUTABLE functions are not allowed in the RETURNING clause -- modifying ctes are not supported via fast-path WITH t1 AS (DELETE FROM modify_fast_path WHERE key = 1), t2 AS (SELECT * FROM modify_fast_path) SELECT * FROM t2; DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries -DEBUG: generating subplan 22_1 for CTE t1: DELETE FROM fast_path_router_modify.modify_fast_path WHERE (key OPERATOR(pg_catalog.=) 1) +DEBUG: generating subplan XXX_1 for CTE t1: DELETE FROM fast_path_router_modify.modify_fast_path WHERE (key OPERATOR(pg_catalog.=) 1) DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 -DEBUG: generating subplan 22_2 for CTE t2: SELECT key, value_1, value_2 FROM fast_path_router_modify.modify_fast_path +DEBUG: generating subplan XXX_2 for CTE t2: SELECT key, value_1, value_2 FROM fast_path_router_modify.modify_fast_path DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT key, value_1, value_2 FROM (SELECT intermediate_result.key, intermediate_result.value_1, intermediate_result.value_2 FROM read_intermediate_result('22_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value_1 integer, value_2 text)) t2 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT key, value_1, value_2 FROM (SELECT intermediate_result.key, intermediate_result.value_1, intermediate_result.value_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value_1 integer, value_2 text)) t2 DEBUG: Creating router plan DEBUG: Plan is router executable - key | value_1 | value_2 ------+---------+--------- + key | value_1 | value_2 +--------------------------------------------------------------------- (0 rows) -- for update/share is supported via fast-path when replication factor = 1 or reference table @@ -166,8 +166,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - key | value_1 | value_2 ------+---------+--------- + key | value_1 | value_2 +--------------------------------------------------------------------- (0 rows) SELECT * FROM modify_fast_path WHERE key = 1 FOR SHARE; @@ -175,24 +175,24 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - key | value_1 | value_2 ------+---------+--------- + key | value_1 | value_2 +--------------------------------------------------------------------- (0 rows) SELECT * FROM modify_fast_path_reference WHERE key = 1 FOR UPDATE; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - key | value_1 | value_2 ------+---------+--------- + key | value_1 | value_2 +--------------------------------------------------------------------- (0 rows) SELECT * FROM modify_fast_path_reference WHERE key = 1 FOR SHARE; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - key | value_1 | value_2 ------+---------+--------- + key | value_1 | value_2 +--------------------------------------------------------------------- (0 rows) -- for update/share is not supported via fast-path wen replication factor > 1 @@ -281,9 +281,9 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 1 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement - modify_fast_path_plpsql -------------------------- - + modify_fast_path_plpsql +--------------------------------------------------------------------- + (1 row) SELECT modify_fast_path_plpsql(2,2); @@ -297,9 +297,9 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 2 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement - modify_fast_path_plpsql -------------------------- - + modify_fast_path_plpsql +--------------------------------------------------------------------- + (1 row) SELECT modify_fast_path_plpsql(3,3); @@ -313,9 +313,9 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 3 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement - modify_fast_path_plpsql -------------------------- - + modify_fast_path_plpsql +--------------------------------------------------------------------- + (1 row) SELECT modify_fast_path_plpsql(4,4); @@ -329,9 +329,9 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 4 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement - modify_fast_path_plpsql -------------------------- - + modify_fast_path_plpsql +--------------------------------------------------------------------- + (1 row) SELECT modify_fast_path_plpsql(5,5); @@ -345,9 +345,9 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 5 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement - modify_fast_path_plpsql -------------------------- - + modify_fast_path_plpsql +--------------------------------------------------------------------- + (1 row) SELECT modify_fast_path_plpsql(6,6); @@ -364,9 +364,9 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 6 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement - modify_fast_path_plpsql -------------------------- - + modify_fast_path_plpsql +--------------------------------------------------------------------- + (1 row) SELECT modify_fast_path_plpsql(6,6); @@ -380,9 +380,9 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 6 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement - modify_fast_path_plpsql -------------------------- - + modify_fast_path_plpsql +--------------------------------------------------------------------- + (1 row) RESET client_min_messages; diff --git a/src/test/regress/expected/foreign_key_restriction_enforcement.out b/src/test/regress/expected/foreign_key_restriction_enforcement.out index 5efcbd9dd..0fcade1c5 100644 --- a/src/test/regress/expected/foreign_key_restriction_enforcement.out +++ b/src/test/regress/expected/foreign_key_restriction_enforcement.out @@ -10,30 +10,30 @@ SET citus.next_placement_id TO 2380000; SET citus.shard_replication_factor TO 1; CREATE TABLE transitive_reference_table(id int PRIMARY KEY); SELECT create_reference_table('transitive_reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE reference_table(id int PRIMARY KEY, value_1 int); SELECT create_reference_table('reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE on_update_fkey_table(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('on_update_fkey_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE unrelated_dist_table(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('unrelated_dist_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE on_update_fkey_table ADD CONSTRAINT fkey FOREIGN KEY(value_1) REFERENCES reference_table(id) ON UPDATE CASCADE; @@ -47,28 +47,28 @@ SET client_min_messages TO DEBUG1; -- case 1.1: SELECT to a reference table is followed by a parallel SELECT to a distributed table BEGIN; SELECT count(*) FROM reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) SELECT count(*) FROM on_update_fkey_table; - count -------- + count +--------------------------------------------------------------------- 1001 (1 row) ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) SELECT count(*) FROM on_update_fkey_table; - count -------- + count +--------------------------------------------------------------------- 1001 (1 row) @@ -76,64 +76,64 @@ ROLLBACK; -- case 1.2: SELECT to a reference table is followed by a multiple router SELECTs to a distributed table BEGIN; SELECT count(*) FROM reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 15; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 16; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 17; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 18; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 15; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 16; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 17; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 18; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -141,8 +141,8 @@ ROLLBACK; -- case 1.3: SELECT to a reference table is followed by a multi-shard UPDATE to a distributed table BEGIN; SELECT count(*) FROM reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -150,8 +150,8 @@ BEGIN; ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -160,8 +160,8 @@ ROLLBACK; -- case 1.4: SELECT to a reference table is followed by a multiple sing-shard UPDATE to a distributed table BEGIN; SELECT count(*) FROM reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -172,8 +172,8 @@ BEGIN; ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -185,8 +185,8 @@ ROLLBACK; -- case 1.5: SELECT to a reference table is followed by a DDL that touches fkey column BEGIN; SELECT count(*) FROM reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -197,8 +197,8 @@ DEBUG: validating foreign key constraint "fkey" ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -210,8 +210,8 @@ ROLLBACK; -- case 1.6: SELECT to a reference table is followed by an unrelated DDL BEGIN; SELECT count(*) FROM reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -221,8 +221,8 @@ DETAIL: cannot execute parallel DDL on relation "on_update_fkey_table" after SE ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -234,8 +234,8 @@ ROLLBACK; -- the foreign key column BEGIN; SELECT count(*) FROM reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -245,8 +245,8 @@ BEGIN; ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -258,14 +258,14 @@ ROLLBACK; -- the foreign key column after a parallel query has been executed BEGIN; SELECT count(*) FROM unrelated_dist_table; - count -------- + count +--------------------------------------------------------------------- 1001 (1 row) SELECT count(*) FROM reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -276,14 +276,14 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m ROLLBACK; BEGIN; SELECT count(*) FROM unrelated_dist_table; - count -------- + count +--------------------------------------------------------------------- 1001 (1 row) SELECT count(*) FROM transitive_reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -296,14 +296,14 @@ ROLLBACK; -- the foreign key column, and a parallel query has already been executed BEGIN; SELECT count(*) FROM unrelated_dist_table; - count -------- + count +--------------------------------------------------------------------- 1001 (1 row) SELECT count(*) FROM reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -314,14 +314,14 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m ROLLBACK; BEGIN; SELECT count(*) FROM unrelated_dist_table; - count -------- + count +--------------------------------------------------------------------- 1001 (1 row) SELECT count(*) FROM transitive_reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -333,8 +333,8 @@ ROLLBACK; -- case 1.8: SELECT to a reference table is followed by a COPY BEGIN; SELECT count(*) FROM reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -342,8 +342,8 @@ BEGIN; ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -355,14 +355,14 @@ BEGIN; DEBUG: switching to sequential query execution mode DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) @@ -372,14 +372,14 @@ BEGIN; DEBUG: switching to sequential query execution mode DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -390,26 +390,26 @@ BEGIN; DEBUG: switching to sequential query execution mode DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 99; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 199; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 299; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 399; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -419,26 +419,26 @@ BEGIN; DEBUG: switching to sequential query execution mode DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 99; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 199; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 299; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 399; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -471,9 +471,9 @@ BEGIN; DEBUG: switching to sequential query execution mode DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode UPDATE on_update_fkey_table SET value_1 = 101 WHERE id = 1; -ERROR: insert or update on table "on_update_fkey_table_2380002" violates foreign key constraint "fkey_2380002" +ERROR: insert or update on table "on_update_fkey_table_xxxxxxx" violates foreign key constraint "fkey_xxxxxxx" DETAIL: Key (value_1)=(101) is not present in table "reference_table_2380001". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx UPDATE on_update_fkey_table SET value_1 = 101 WHERE id = 2; ERROR: current transaction is aborted, commands ignored until end of transaction block UPDATE on_update_fkey_table SET value_1 = 101 WHERE id = 3; @@ -525,7 +525,7 @@ BEGIN; DEBUG: switching to sequential query execution mode DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode COPY on_update_fkey_table FROM STDIN WITH CSV; -ERROR: insert or update on table "on_update_fkey_table_2380004" violates foreign key constraint "fkey_2380004" +ERROR: insert or update on table "on_update_fkey_table_xxxxxxx" violates foreign key constraint "fkey_xxxxxxx" DETAIL: Key (value_1)=(101) is not present in table "reference_table_2380001". ROLLBACK; -- case 2.8: UPDATE to a reference table is followed by TRUNCATE @@ -549,8 +549,8 @@ BEGIN; DEBUG: switching to sequential query execution mode DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table; - count -------- + count +--------------------------------------------------------------------- 1001 (1 row) @@ -560,8 +560,8 @@ BEGIN; DEBUG: switching to sequential query execution mode DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table; - count -------- + count +--------------------------------------------------------------------- 1001 (1 row) @@ -570,8 +570,8 @@ ROLLBACK; BEGIN; ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE int; SELECT count(*) FROM on_update_fkey_table; - count -------- + count +--------------------------------------------------------------------- 1001 (1 row) @@ -579,8 +579,8 @@ ROLLBACK; BEGIN; ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE int; SELECT count(*) FROM on_update_fkey_table; - count -------- + count +--------------------------------------------------------------------- 1001 (1 row) @@ -700,34 +700,34 @@ DEBUG: validating foreign key constraint "fkey" TRUNCATE on_update_fkey_table; DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially ROLLBACK; ------ +--------------------------------------------------------------------- --- Now, start testing the other way araound ------ +--------------------------------------------------------------------- -- case 4.1: SELECT to a dist table is follwed by a SELECT to a reference table BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM transitive_reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -735,8 +735,8 @@ ROLLBACK; -- case 4.2: SELECT to a dist table is follwed by a DML to a reference table BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) @@ -747,8 +747,8 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) @@ -760,8 +760,8 @@ ROLLBACK; -- case 4.3: SELECT to a dist table is follwed by an unrelated DDL to a reference table BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) @@ -771,8 +771,8 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) @@ -783,8 +783,8 @@ ROLLBACK; -- case 4.4: SELECT to a dist table is follwed by a DDL to a reference table BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) @@ -797,8 +797,8 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) @@ -814,8 +814,8 @@ ROLLBACK; SET client_min_messages to LOG; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) @@ -825,8 +825,8 @@ ERROR: cannot execute DDL on reference relation "reference_table" because there ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) @@ -838,8 +838,8 @@ ROLLBACK; -- case 4.6: Router SELECT to a dist table is followed by a TRUNCATE BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE id = 9; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -848,8 +848,8 @@ NOTICE: truncate cascades to table "on_update_fkey_table" ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE id = 9; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -863,8 +863,8 @@ RESET client_min_messages; BEGIN; UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15; SELECT count(*) FROM reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -872,8 +872,8 @@ ROLLBACK; BEGIN; UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15; SELECT count(*) FROM transitive_reference_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -1033,16 +1033,16 @@ ROLLBACK; BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- make sure that the output isn't too verbose @@ -1054,16 +1054,16 @@ ROLLBACK; BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id)); SELECT create_distributed_table('tt4', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id)); @@ -1083,23 +1083,23 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id)); SELECT create_distributed_table('tt4', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id)); SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- make sure that the output isn't too verbose @@ -1112,16 +1112,16 @@ ROLLBACK; BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id); @@ -1139,16 +1139,16 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id); @@ -1162,16 +1162,16 @@ COMMIT; BEGIN; CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id); @@ -1189,16 +1189,16 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id); @@ -1211,8 +1211,8 @@ ROLLBACK; -- setting the mode to sequential should fail BEGIN; SELECT count(*) FROM on_update_fkey_table; - count -------- + count +--------------------------------------------------------------------- 1001 (1 row) @@ -1243,9 +1243,9 @@ BEGIN; INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i; SELECT create_reference_table('test_table_1'); NOTICE: Copying data from local table... - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); @@ -1267,9 +1267,9 @@ BEGIN; INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i; SELECT create_reference_table('test_table_1'); NOTICE: Copying data from local table... - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); @@ -1287,29 +1287,29 @@ BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- and maybe some other test CREATE INDEX i1 ON test_table_1(id); ALTER TABLE test_table_2 ADD CONSTRAINT check_val CHECK (id > 0); SELECT count(*) FROM test_table_2; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table_1; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -1327,18 +1327,18 @@ CREATE TABLE reference_table(id int PRIMARY KEY); DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "reference_table_pkey" for table "reference_table" DEBUG: building index "reference_table_pkey" on table "reference_table" serially SELECT create_reference_table('reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE distributed_table(id int PRIMARY KEY, value_1 int); DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "distributed_table_pkey" for table "distributed_table" DEBUG: building index "distributed_table_pkey" on table "distributed_table" serially SELECT create_distributed_table('distributed_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE @@ -1359,12 +1359,12 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator -- see https://github.com/citusdata/citus_docs/issues/664 for the discussion WITH t1 AS (DELETE FROM reference_table RETURNING id) DELETE FROM distributed_table USING t1 WHERE value_1 = t1.id RETURNING *; -DEBUG: generating subplan 170_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id -DEBUG: Plan 170 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.distributed_table USING (SELECT intermediate_result.id FROM read_intermediate_result('170_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) RETURNING distributed_table.id, distributed_table.value_1, t1.id +DEBUG: generating subplan XXX_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id +DEBUG: Plan XXX query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.distributed_table USING (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) RETURNING distributed_table.id, distributed_table.value_1, t1.id DEBUG: switching to sequential query execution mode DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode - id | value_1 | id -----+---------+---- + id | value_1 | id +--------------------------------------------------------------------- (0 rows) -- load some more data for one more test with real-time selects @@ -1380,12 +1380,12 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator -- see https://github.com/citusdata/citus_docs/issues/664 for the discussion WITH t1 AS (DELETE FROM reference_table RETURNING id) SELECT count(*) FROM distributed_table, t1 WHERE value_1 = t1.id; -DEBUG: generating subplan 174_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id -DEBUG: Plan 174 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('174_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) +DEBUG: generating subplan XXX_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) DEBUG: switching to sequential query execution mode DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -1394,17 +1394,17 @@ DETAIL: Reference relation "reference_table" is modified, which might lead to d WITH t1 AS (DELETE FROM distributed_table RETURNING id), t2 AS (DELETE FROM reference_table RETURNING id) SELECT count(*) FROM distributed_table, t1, t2 WHERE value_1 = t1.id AND value_1 = t2.id; -DEBUG: generating subplan 176_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id -DEBUG: generating subplan 176_2 for CTE t2: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id -DEBUG: Plan 176 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('176_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1, (SELECT intermediate_result.id FROM read_intermediate_result('176_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t2 WHERE ((distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) AND (distributed_table.value_1 OPERATOR(pg_catalog.=) t2.id)) +DEBUG: generating subplan XXX_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id +DEBUG: generating subplan XXX_2 for CTE t2: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1, (SELECT intermediate_result.id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t2 WHERE ((distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) AND (distributed_table.value_1 OPERATOR(pg_catalog.=) t2.id)) ERROR: cannot execute DML on reference relation "reference_table" because there was a parallel DML access to distributed relation "distributed_table" in the same transaction HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" -- similarly this should fail since we first access to a distributed -- table via t1, and then access to the reference table in the main query WITH t1 AS (DELETE FROM distributed_table RETURNING id) DELETE FROM reference_table RETURNING id; -DEBUG: generating subplan 179_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id -DEBUG: Plan 179 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id +DEBUG: generating subplan XXX_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id +DEBUG: Plan XXX query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id ERROR: cannot execute DML on reference relation "reference_table" because there was a parallel DML access to distributed relation "distributed_table" in the same transaction HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" -- finally, make sure that we can execute the same queries @@ -1414,11 +1414,11 @@ BEGIN; WITH t1 AS (DELETE FROM distributed_table RETURNING id), t2 AS (DELETE FROM reference_table RETURNING id) SELECT count(*) FROM distributed_table, t1, t2 WHERE value_1 = t1.id AND value_1 = t2.id; -DEBUG: generating subplan 181_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id -DEBUG: generating subplan 181_2 for CTE t2: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id -DEBUG: Plan 181 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('181_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1, (SELECT intermediate_result.id FROM read_intermediate_result('181_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t2 WHERE ((distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) AND (distributed_table.value_1 OPERATOR(pg_catalog.=) t2.id)) - count -------- +DEBUG: generating subplan XXX_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id +DEBUG: generating subplan XXX_2 for CTE t2: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1, (SELECT intermediate_result.id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t2 WHERE ((distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) AND (distributed_table.value_1 OPERATOR(pg_catalog.=) t2.id)) + count +--------------------------------------------------------------------- 0 (1 row) @@ -1427,10 +1427,10 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; WITH t1 AS (DELETE FROM distributed_table RETURNING id) DELETE FROM reference_table RETURNING id; -DEBUG: generating subplan 184_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id -DEBUG: Plan 184 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id - id ----- +DEBUG: generating subplan XXX_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id +DEBUG: Plan XXX query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id + id +--------------------------------------------------------------------- (0 rows) ROLLBACK; diff --git a/src/test/regress/expected/foreign_key_to_reference_table.out b/src/test/regress/expected/foreign_key_to_reference_table.out index dd59151c2..9f30cd236 100644 --- a/src/test/regress/expected/foreign_key_to_reference_table.out +++ b/src/test/regress/expected/foreign_key_to_reference_table.out @@ -28,18 +28,18 @@ SELECT d $$ )).RESULT::json )::json )).* ; CREATE TABLE referenced_table(id int UNIQUE, test_column int); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- we still do not support update/delete operations through foreign constraints if the foreign key includes the distribution column -- All should fail CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE SET NULL; @@ -53,9 +53,9 @@ DETAIL: SET NULL or SET DEFAULT is not supported in ON DELETE operation when di DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT; @@ -69,9 +69,9 @@ DETAIL: SET NULL or SET DEFAULT is not supported in ON DELETE operation when di DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON UPDATE SET NULL; @@ -88,16 +88,16 @@ ROLLBACK; DROP TABLE referenced_table; CREATE TABLE referenced_table(id int, test_column int, PRIMARY KEY(id, test_column)); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id, ref_id) REFERENCES referenced_table(id, test_column) ON UPDATE SET DEFAULT; @@ -111,9 +111,9 @@ DETAIL: SET NULL, SET DEFAULT or CASCADE is not supported in ON UPDATE operatio DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id, ref_id) REFERENCES referenced_table(id, test_column) ON UPDATE CASCADE; @@ -130,22 +130,22 @@ ROLLBACK; DROP TABLE referenced_table; CREATE TABLE referenced_table(id int, test_column int, PRIMARY KEY(id)); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET NULL; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid -------------------+------------------------------------------------+----------------------------------------------- + name | relid | refd_relid +--------------------------------------------------------------------- fkey_ref_7000043 | fkey_reference_table.referencing_table_7000043 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000044 | fkey_reference_table.referencing_table_7000044 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000045 | fkey_reference_table.referencing_table_7000045 | fkey_reference_table.referenced_table_7000042 @@ -159,14 +159,14 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' A DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET NULL); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid ------------------------------------+------------------------------------------------+----------------------------------------------- + name | relid | refd_relid +--------------------------------------------------------------------- referencing_table_id_fkey_7000051 | fkey_reference_table.referencing_table_7000051 | fkey_reference_table.referenced_table_7000042 referencing_table_id_fkey_7000052 | fkey_reference_table.referencing_table_7000052 | fkey_reference_table.referenced_table_7000042 referencing_table_id_fkey_7000053 | fkey_reference_table.referencing_table_7000053 | fkey_reference_table.referenced_table_7000042 @@ -180,15 +180,15 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' A DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid -------------------+------------------------------------------------+----------------------------------------------- + name | relid | refd_relid +--------------------------------------------------------------------- fkey_ref_7000059 | fkey_reference_table.referencing_table_7000059 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000060 | fkey_reference_table.referencing_table_7000060 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000061 | fkey_reference_table.referencing_table_7000061 | fkey_reference_table.referenced_table_7000042 @@ -203,15 +203,15 @@ DROP TABLE referencing_table; BEGIN; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) COMMIT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid ------------------------------------+------------------------------------------------+----------------------------------------------- + name | relid | refd_relid +--------------------------------------------------------------------- referencing_table_id_fkey_7000067 | fkey_reference_table.referencing_table_7000067 | fkey_reference_table.referenced_table_7000042 referencing_table_id_fkey_7000068 | fkey_reference_table.referencing_table_7000068 | fkey_reference_table.referenced_table_7000042 referencing_table_id_fkey_7000069 | fkey_reference_table.referencing_table_7000069 | fkey_reference_table.referenced_table_7000042 @@ -225,15 +225,15 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' A DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON UPDATE SET NULL; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid -------------------+------------------------------------------------+----------------------------------------------- + name | relid | refd_relid +--------------------------------------------------------------------- fkey_ref_7000075 | fkey_reference_table.referencing_table_7000075 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000076 | fkey_reference_table.referencing_table_7000076 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000077 | fkey_reference_table.referencing_table_7000077 | fkey_reference_table.referenced_table_7000042 @@ -247,15 +247,15 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' A DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON UPDATE SET DEFAULT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid -------------------+------------------------------------------------+----------------------------------------------- + name | relid | refd_relid +--------------------------------------------------------------------- fkey_ref_7000083 | fkey_reference_table.referencing_table_7000083 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000084 | fkey_reference_table.referencing_table_7000084 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000085 | fkey_reference_table.referencing_table_7000085 | fkey_reference_table.referenced_table_7000042 @@ -269,15 +269,15 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' A DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON UPDATE CASCADE; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid -------------------+------------------------------------------------+----------------------------------------------- + name | relid | refd_relid +--------------------------------------------------------------------- fkey_ref_7000091 | fkey_reference_table.referencing_table_7000091 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000092 | fkey_reference_table.referencing_table_7000092 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000093 | fkey_reference_table.referencing_table_7000093 | fkey_reference_table.referenced_table_7000042 @@ -292,9 +292,9 @@ DROP TABLE referencing_table; -- check if we can add the foreign key while adding the column CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD COLUMN referencing int REFERENCES referenced_table(id) ON UPDATE CASCADE; @@ -302,8 +302,8 @@ ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names HINT: You can issue each command separately such as ALTER TABLE referencing_table ADD COLUMN referencing data_type; ALTER TABLE referencing_table ADD CONSTRAINT constraint_name FOREIGN KEY (referencing) REFERENCES referenced_table(id) ON UPDATE CASCADE; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid -------+-------+------------ + name | relid | refd_relid +--------------------------------------------------------------------- (0 rows) DROP TABLE referencing_table; @@ -311,9 +311,9 @@ DROP TABLE referencing_table; SET citus.shard_replication_factor TO 2; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCES referenced_table(id); @@ -321,17 +321,17 @@ ERROR: cannot create foreign key constraint DETAIL: Citus Community Edition currently supports foreign key constraints only for "citus.shard_replication_factor = 1". HINT: Please change "citus.shard_replication_factor to 1". To learn more about using foreign keys with other replication factors, please contact us at https://citusdata.com/about/contact_us. SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid -------+-------+------------ + name | relid | refd_relid +--------------------------------------------------------------------- (0 rows) DROP TABLE referencing_table; -- should fail when we add the column as well CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD COLUMN referencing_col int REFERENCES referenced_table(id) ON DELETE SET NULL; @@ -339,8 +339,8 @@ ERROR: cannot create foreign key constraint DETAIL: Citus Community Edition currently supports foreign key constraints only for "citus.shard_replication_factor = 1". HINT: Please change "citus.shard_replication_factor to 1". To learn more about using foreign keys with other replication factors, please contact us at https://citusdata.com/about/contact_us. SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid -------+-------+------------ + name | relid | refd_relid +--------------------------------------------------------------------- (0 rows) DROP TABLE referencing_table; @@ -348,14 +348,14 @@ SET citus.shard_replication_factor TO 1; -- simple create_distributed_table should work in/out transactions on tables with foreign key to reference tables CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY (id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid ------------------------------------+------------------------------------------------+----------------------------------------------- + name | relid | refd_relid +--------------------------------------------------------------------- referencing_table_id_fkey_7000123 | fkey_reference_table.referencing_table_7000123 | fkey_reference_table.referenced_table_7000042 referencing_table_id_fkey_7000124 | fkey_reference_table.referencing_table_7000124 | fkey_reference_table.referenced_table_7000042 referencing_table_id_fkey_7000125 | fkey_reference_table.referencing_table_7000125 | fkey_reference_table.referenced_table_7000042 @@ -371,22 +371,22 @@ DROP TABLE referenced_table; BEGIN; CREATE TABLE referenced_table(id int, test_column int, PRIMARY KEY(id)); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY (id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) COMMIT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid ------------------------------------+------------------------------------------------+----------------------------------------------- + name | relid | refd_relid +--------------------------------------------------------------------- referencing_table_id_fkey_7000132 | fkey_reference_table.referencing_table_7000132 | fkey_reference_table.referenced_table_7000131 referencing_table_id_fkey_7000133 | fkey_reference_table.referencing_table_7000133 | fkey_reference_table.referenced_table_7000131 referencing_table_id_fkey_7000134 | fkey_reference_table.referencing_table_7000134 | fkey_reference_table.referenced_table_7000131 @@ -402,33 +402,33 @@ DROP TABLE referencing_table; -- distribution column or from distributed tables to reference tables. CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCES referenced_table(id); ERROR: cannot create foreign key constraint since relations are not colocated or not referencing a reference table DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table SELECT * FROM table_fkeys_in_workers WHERE name LIKE 'fkey_ref%' ORDER BY 1,2,3; - name | relid | refd_relid -------+-------+------------ + name | relid | refd_relid +--------------------------------------------------------------------- (0 rows) DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id', 'range'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCES referenced_table(id); ERROR: cannot create foreign key constraint since relations are not colocated or not referencing a reference table DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table SELECT * FROM table_fkeys_in_workers WHERE name LIKE 'fkey_ref%' ORDER BY 1,2,3; - name | relid | refd_relid -------+-------+------------ + name | relid | refd_relid +--------------------------------------------------------------------- (0 rows) DROP TABLE referencing_table; @@ -437,41 +437,41 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFERENCES referenced_table(id); -- test inserts -- test insert to referencing table while there is NO corresponding value in referenced table INSERT INTO referencing_table VALUES(1, 1); -ERROR: insert or update on table "referencing_table_7000141" violates foreign key constraint "fkey_ref_7000141" -DETAIL: Key (ref_id)=(1) is not present in table "referenced_table_7000140". -CONTEXT: while executing command on localhost:57637 +ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "fkey_ref_xxxxxxx" +DETAIL: Key (ref_id)=(X) is not present in table "referenced_table_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx -- test insert to referencing while there is corresponding value in referenced table INSERT INTO referenced_table SELECT x, x from generate_series(1,1000) as f(x); INSERT INTO referencing_table SELECT x, x from generate_series(1,500) as f(x); -- test deletes -- test delete from referenced table while there is corresponding value in referencing table DELETE FROM referenced_table WHERE id > 3; -ERROR: update or delete on table "referenced_table_7000140" violates foreign key constraint "fkey_ref_7000143" on table "referencing_table_7000143" -DETAIL: Key (id)=(4) is still referenced from table "referencing_table_7000143". -CONTEXT: while executing command on localhost:57637 +ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "fkey_ref_xxxxxxx" on table "referencing_table_xxxxxxx" +DETAIL: Key (id)=(X) is still referenced from table "referencing_table_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx -- test delete from referenced table while there is NO corresponding value in referencing table DELETE FROM referenced_table WHERE id = 501; -- test cascading truncate TRUNCATE referenced_table CASCADE; NOTICE: truncate cascades to table "referencing_table" SELECT count(*) FROM referencing_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -483,15 +483,15 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int, test_column int, PRIMARY KEY(id)); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('referencing_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- self referencing foreign key @@ -506,15 +506,15 @@ CREATE SCHEMA referencing_schema; CREATE TABLE referenced_schema.referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_schema.referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_schema.referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_schema.referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_schema.referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFERENCES referenced_schema.referenced_table(id) ON DELETE CASCADE; @@ -522,8 +522,8 @@ INSERT INTO referenced_schema.referenced_table SELECT x, x from generate_series( INSERT INTO referencing_schema.referencing_table SELECT x, x from generate_series(1,1000) as f(x); DELETE FROM referenced_schema.referenced_table WHERE id > 800; SELECT count(*) FROM referencing_schema.referencing_table; - count -------- + count +--------------------------------------------------------------------- 800 (1 row) @@ -535,15 +535,15 @@ RESET client_min_messages; CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(test_column)); CREATE TABLE referencing_table(id int, ref_id int DEFAULT 1); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFERENCES referenced_table(test_column) ON DELETE SET DEFAULT; @@ -551,8 +551,8 @@ INSERT INTO referenced_table SELECT x, x FROM generate_series(1,1000) AS f(x); INSERT INTO referencing_table SELECT x, x FROM generate_series(1,1000) AS f(x); DELETE FROM referenced_table WHERE test_column > 800; SELECT count(*) FROM referencing_table WHERE ref_id = 1; - count -------- + count +--------------------------------------------------------------------- 201 (1 row) @@ -563,15 +563,15 @@ CREATE TYPE fkey_reference_table.composite AS (key1 int, key2 int); CREATE TABLE referenced_table(test_column composite, PRIMARY KEY(test_column)); CREATE TABLE referencing_table(id int, referencing_composite composite); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (referencing_composite) REFERENCES referenced_table(test_column) ON DELETE CASCADE; @@ -579,8 +579,8 @@ INSERT INTO referenced_table SELECT (x+1, x+1)::composite FROM generate_series(1 INSERT INTO referencing_table SELECT x, (x+1, x+1)::composite FROM generate_series(1,1000) AS f(x); DELETE FROM referenced_table WHERE (test_column).key1 > 900; SELECT count(*) FROM referencing_table; - count -------- + count +--------------------------------------------------------------------- 899 (1 row) @@ -595,15 +595,15 @@ DROP TABLE referencing_table CASCADE; CREATE TABLE referenced_table(test_column SERIAL PRIMARY KEY, test_column2 int); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFERENCES referenced_table(test_column) ON DELETE CASCADE; @@ -611,8 +611,8 @@ INSERT INTO referenced_table(test_column2) SELECT x FROM generate_series(1,1000) INSERT INTO referencing_table SELECT x, x FROM generate_series(1,1000) AS f(x); DELETE FROM referenced_table WHERE test_column2 > 10; SELECT count(*) FROM referencing_table; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) @@ -627,15 +627,15 @@ DROP TABLE referencing_table CASCADE; CREATE TABLE referenced_table(test_column int PRIMARY KEY, test_column2 int); CREATE TABLE referencing_table(id int, ref_id SERIAL); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFERENCES referenced_table(test_column) ON DELETE CASCADE; @@ -644,8 +644,8 @@ INSERT INTO referenced_table SELECT x,x FROM generate_series(1,1000) AS f(x); INSERT INTO referencing_table(id) SELECT x FROM generate_series(1,1000) AS f(x); -- Fails for non existing value inserts (serial is already incremented) INSERT INTO referencing_table(id) SELECT x FROM generate_series(1,10) AS f(x); -ERROR: insert or update on table "referencing_table_7000190" violates foreign key constraint "fkey_ref_7000190" -DETAIL: Key (ref_id)=(1004) is not present in table "referenced_table_7000187". +ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "fkey_ref_xxxxxxx" +DETAIL: Key (ref_id)=(X) is not present in table "referenced_table_xxxxxxx". DROP TABLE referenced_table CASCADE; NOTICE: drop cascades to constraint fkey_ref on table referencing_table DROP TABLE referencing_table CASCADE; @@ -658,15 +658,15 @@ DROP TABLE referencing_table CASCADE; CREATE TABLE referenced_table(test_column SERIAL PRIMARY KEY, test_column2 int); CREATE TABLE referencing_table(id int, ref_id SERIAL); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFERENCES referenced_table(test_column) ON DELETE CASCADE; @@ -675,8 +675,8 @@ INSERT INTO referenced_table(test_column2) SELECT x FROM generate_series(1,1000) INSERT INTO referencing_table(id) SELECT x FROM generate_series(1,1000) AS f(x); -- Fails for non existing value inserts (serial is already incremented) INSERT INTO referencing_table(id) SELECT x FROM generate_series(1,10) AS f(x); -ERROR: insert or update on table "referencing_table_7000199" violates foreign key constraint "fkey_ref_7000199" -DETAIL: Key (ref_id)=(1004) is not present in table "referenced_table_7000196". +ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "fkey_ref_xxxxxxx" +DETAIL: Key (ref_id)=(X) is not present in table "referenced_table_xxxxxxx". DROP TABLE referenced_table CASCADE; NOTICE: drop cascades to constraint fkey_ref on table referencing_table DROP TABLE referencing_table CASCADE; @@ -687,15 +687,15 @@ DROP TABLE referencing_table CASCADE; CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(test_column)); CREATE TABLE referencing_table(id int, ref_id int DEFAULT -1); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFERENCES referenced_table(test_column) ON DELETE SET DEFAULT; @@ -709,15 +709,15 @@ DROP TABLE referencing_table CASCADE; CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(test_column)); CREATE TABLE referencing_table(id int, ref_id int DEFAULT -1); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFERENCES referenced_table(test_column) ON UPDATE CASCADE; @@ -728,8 +728,8 @@ ON CONFLICT (test_column) DO UPDATE SET test_column = -1 * EXCLUDED.test_column; SELECT * FROM referencing_table WHERE ref_id < 0 ORDER BY 1; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- 1 | -1 2 | -2 3 | -3 @@ -746,9 +746,9 @@ INSERT INTO referenced_table VALUES (1,1), (2,2), (3,3); INSERT INTO referencing_table VALUES (1,1), (2,2), (3,3); SELECT create_reference_table('referenced_table'); NOTICE: Copying data from local table... - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'id'); @@ -773,28 +773,28 @@ CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(tes CREATE TABLE referenced_table2(test_column int, test_column2 int, PRIMARY KEY(test_column2)); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('referenced_table2'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCES referenced_table(test_column) ON DELETE CASCADE; ALTER TABLE referencing_table ADD CONSTRAINT foreign_key_2 FOREIGN KEY (id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid ------------------------+------------------------------------------------+------------------------------------------------ + name | relid | refd_relid +--------------------------------------------------------------------- fkey_ref_7000226 | fkey_reference_table.referencing_table_7000226 | fkey_reference_table.referenced_table_7000224 fkey_ref_7000227 | fkey_reference_table.referencing_table_7000227 | fkey_reference_table.referenced_table_7000224 fkey_ref_7000228 | fkey_reference_table.referencing_table_7000228 | fkey_reference_table.referenced_table_7000224 @@ -817,35 +817,35 @@ INSERT INTO referenced_table SELECT x, x+1 FROM generate_series(0,1000) AS f(x); INSERT INTO referenced_table2 SELECT x, x+1 FROM generate_series(500,1500) AS f(x); -- should fail INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(0,1500) AS f(x); -ERROR: insert or update on table "referencing_table_7000226" violates foreign key constraint "foreign_key_2_7000226" -DETAIL: Key (id)=(1) is not present in table "referenced_table2_7000225". +ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "foreign_key_2_xxxxxxx" +DETAIL: Key (id)=(X) is not present in table "referenced_table2_xxxxxxx". -- should fail INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(0,400) AS f(x); -ERROR: insert or update on table "referencing_table_7000226" violates foreign key constraint "foreign_key_2_7000226" -DETAIL: Key (id)=(1) is not present in table "referenced_table2_7000225". +ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "foreign_key_2_xxxxxxx" +DETAIL: Key (id)=(X) is not present in table "referenced_table2_xxxxxxx". -- should fail INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(1000,1400) AS f(x); -ERROR: insert or update on table "referencing_table_7000228" violates foreign key constraint "fkey_ref_7000228" -DETAIL: Key (id)=(1015) is not present in table "referenced_table_7000224". +ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "fkey_ref_xxxxxxx" +DETAIL: Key (id)=(X) is not present in table "referenced_table_xxxxxxx". -- should succeed INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(600,900) AS f(x); SELECT count(*) FROM referencing_table; - count -------- + count +--------------------------------------------------------------------- 301 (1 row) DELETE FROM referenced_table WHERE test_column < 700; SELECT count(*) FROM referencing_table; - count -------- + count +--------------------------------------------------------------------- 201 (1 row) DELETE FROM referenced_table2 WHERE test_column2 > 800; SELECT count(*) FROM referencing_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -859,26 +859,26 @@ CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(tes CREATE TABLE referenced_table2(test_column int, test_column2 int, PRIMARY KEY(test_column2)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY (id) REFERENCES referenced_table(test_column) ON DELETE CASCADE, FOREIGN KEY (id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('referenced_table2'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count -------- + count +--------------------------------------------------------------------- 16 (1 row) @@ -899,21 +899,21 @@ CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(tes CREATE TABLE referenced_table2(test_column int, test_column2 int, PRIMARY KEY(test_column2)); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('referenced_table2'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -921,8 +921,8 @@ BEGIN; ALTER TABLE referencing_table ADD CONSTRAINT foreign_key_2 FOREIGN KEY (ref_id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE; COMMIT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid ------------------------+------------------------------------------------+------------------------------------------------ + name | relid | refd_relid +--------------------------------------------------------------------- fkey_ref_7000246 | fkey_reference_table.referencing_table_7000246 | fkey_reference_table.referenced_table_7000244 fkey_ref_7000247 | fkey_reference_table.referencing_table_7000247 | fkey_reference_table.referenced_table_7000244 fkey_ref_7000248 | fkey_reference_table.referencing_table_7000248 | fkey_reference_table.referenced_table_7000244 @@ -945,32 +945,32 @@ INSERT INTO referenced_table SELECT x, x+1 FROM generate_series(0,1000) AS f(x); INSERT INTO referenced_table2 SELECT x, x+1 FROM generate_series(500,1500) AS f(x); -- should fail INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(0,1500) AS f(x); -ERROR: insert or update on table "referencing_table_7000246" violates foreign key constraint "foreign_key_2_7000246" +ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "foreign_key_2_xxxxxxx" -- should fail INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(0,400) AS f(x); -ERROR: insert or update on table "referencing_table_7000246" violates foreign key constraint "foreign_key_2_7000246" +ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "foreign_key_2_xxxxxxx" -- should fail INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(1000,1400) AS f(x); -ERROR: insert or update on table "referencing_table_7000248" violates foreign key constraint "fkey_ref_7000248" +ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "fkey_ref_xxxxxxx" -- should succeed INSERT INTO referencing_table SELECT x, x+501 FROM generate_series(0,1000) AS f(x); SELECT count(*) FROM referencing_table; - count -------- + count +--------------------------------------------------------------------- 1001 (1 row) DELETE FROM referenced_table WHERE test_column < 700; SELECT count(*) FROM referencing_table; - count -------- + count +--------------------------------------------------------------------- 301 (1 row) DELETE FROM referenced_table2 WHERE test_column2 > 800; SELECT count(*) FROM referencing_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -985,28 +985,28 @@ CREATE TABLE referenced_table2(test_column int, test_column2 int, PRIMARY KEY(te CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY (id) REFERENCES referenced_table(test_column) ON DELETE CASCADE); BEGIN; SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('referenced_table2'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT foreign_key_2 FOREIGN KEY (ref_id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE; COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count -------- + count +--------------------------------------------------------------------- 16 (1 row) @@ -1028,21 +1028,21 @@ CREATE TABLE referenced_table(test_column int, test_column2 int UNIQUE, PRIMARY CREATE TABLE referencing_table(id int PRIMARY KEY, ref_id int); CREATE TABLE referencing_table2(id int, ref_id int); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -1052,8 +1052,8 @@ ALTER TABLE referencing_table2 ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFE ALTER TABLE referencing_table2 ADD CONSTRAINT fkey_ref_to_dist FOREIGN KEY (id) REFERENCES referencing_table(id) ON DELETE CASCADE; COMMIT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid ---------------------------+-------------------------------------------------+------------------------------------------------ + name | relid | refd_relid +--------------------------------------------------------------------- fkey_ref_7000265 | fkey_reference_table.referencing_table_7000265 | fkey_reference_table.referenced_table_7000264 fkey_ref_7000266 | fkey_reference_table.referencing_table_7000266 | fkey_reference_table.referenced_table_7000264 fkey_ref_7000267 | fkey_reference_table.referencing_table_7000267 | fkey_reference_table.referenced_table_7000264 @@ -1083,33 +1083,33 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' A INSERT INTO referenced_table SELECT x, x+1 FROM generate_series(0,1000) AS f(x); -- should fail INSERT INTO referencing_table2 SELECT x, x+1 FROM generate_series(0,100) AS f(x); -ERROR: insert or update on table "referencing_table2_7000273" violates foreign key constraint "fkey_ref_to_dist_7000273" -DETAIL: Key (id)=(1) is not present in table "referencing_table_7000265". +ERROR: insert or update on table "referencing_table2_xxxxxxx" violates foreign key constraint "fkey_ref_to_dist_xxxxxxx" +DETAIL: Key (id)=(X) is not present in table "referencing_table_xxxxxxx". -- should succeed INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(0,400) AS f(x); -- should fail INSERT INTO referencing_table2 SELECT x, x+1 FROM generate_series(200,500) AS f(x); -ERROR: insert or update on table "referencing_table2_7000273" violates foreign key constraint "fkey_ref_to_dist_7000273" -DETAIL: Key (id)=(401) is not present in table "referencing_table_7000265". +ERROR: insert or update on table "referencing_table2_xxxxxxx" violates foreign key constraint "fkey_ref_to_dist_xxxxxxx" +DETAIL: Key (id)=(X) is not present in table "referencing_table_xxxxxxx". -- should succeed INSERT INTO referencing_table2 SELECT x, x+1 FROM generate_series(0,300) AS f(x); DELETE FROM referenced_table WHERE test_column < 200; SELECT count(*) FROM referencing_table; - count -------- + count +--------------------------------------------------------------------- 201 (1 row) SELECT count(*) FROM referencing_table2; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) DELETE FROM referencing_table WHERE id > 200; SELECT count(*) FROM referencing_table2; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -1125,29 +1125,29 @@ CREATE TABLE referenced_table(test_column int, test_column2 int UNIQUE, PRIMARY CREATE TABLE referencing_table(id int PRIMARY KEY, ref_id int, FOREIGN KEY (id) REFERENCES referenced_table(test_column) ON DELETE CASCADE); CREATE TABLE referencing_table2(id int, ref_id int, FOREIGN KEY (ref_id) REFERENCES referenced_table(test_column2) ON DELETE CASCADE, FOREIGN KEY (id) REFERENCES referencing_table(id) ON DELETE CASCADE); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count -------- + count +--------------------------------------------------------------------- 24 (1 row) @@ -1167,27 +1167,27 @@ CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(tes CREATE TABLE referencing_table(id int, ref_id int, ref_id2 int, PRIMARY KEY(id, ref_id)); CREATE TABLE referencing_referencing_table(id int, ref_id int, FOREIGN KEY (id, ref_id) REFERENCES referencing_table(id, ref_id) ON DELETE CASCADE); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id, ref_id2) REFERENCES referenced_table(test_column, test_column2) ON DELETE CASCADE; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.referencing%' ORDER BY 1,2,3; - name | relid | refd_relid -------------------------------------------------------+------------------------------------------------------------+------------------------------------------------ + name | relid | refd_relid +--------------------------------------------------------------------- fkey_ref_7000299 | fkey_reference_table.referencing_table_7000299 | fkey_reference_table.referenced_table_7000298 fkey_ref_7000300 | fkey_reference_table.referencing_table_7000300 | fkey_reference_table.referenced_table_7000298 fkey_ref_7000301 | fkey_reference_table.referencing_table_7000301 | fkey_reference_table.referenced_table_7000298 @@ -1196,14 +1196,14 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.refe fkey_ref_7000304 | fkey_reference_table.referencing_table_7000304 | fkey_reference_table.referenced_table_7000298 fkey_ref_7000305 | fkey_reference_table.referencing_table_7000305 | fkey_reference_table.referenced_table_7000298 fkey_ref_7000306 | fkey_reference_table.referencing_table_7000306 | fkey_reference_table.referenced_table_7000298 - referencing_referencing_table_id_ref_id_fkey_7000307 | fkey_reference_table.referencing_referencing_table_7000307 | fkey_reference_table.referencing_table_7000299 - referencing_referencing_table_id_ref_id_fkey_7000308 | fkey_reference_table.referencing_referencing_table_7000308 | fkey_reference_table.referencing_table_7000300 - referencing_referencing_table_id_ref_id_fkey_7000309 | fkey_reference_table.referencing_referencing_table_7000309 | fkey_reference_table.referencing_table_7000301 - referencing_referencing_table_id_ref_id_fkey_7000310 | fkey_reference_table.referencing_referencing_table_7000310 | fkey_reference_table.referencing_table_7000302 - referencing_referencing_table_id_ref_id_fkey_7000311 | fkey_reference_table.referencing_referencing_table_7000311 | fkey_reference_table.referencing_table_7000303 - referencing_referencing_table_id_ref_id_fkey_7000312 | fkey_reference_table.referencing_referencing_table_7000312 | fkey_reference_table.referencing_table_7000304 - referencing_referencing_table_id_ref_id_fkey_7000313 | fkey_reference_table.referencing_referencing_table_7000313 | fkey_reference_table.referencing_table_7000305 - referencing_referencing_table_id_ref_id_fkey_7000314 | fkey_reference_table.referencing_referencing_table_7000314 | fkey_reference_table.referencing_table_7000306 + referencing_referencing_table_id_fkey_7000307 | fkey_reference_table.referencing_referencing_table_7000307 | fkey_reference_table.referencing_table_7000299 + referencing_referencing_table_id_fkey_7000308 | fkey_reference_table.referencing_referencing_table_7000308 | fkey_reference_table.referencing_table_7000300 + referencing_referencing_table_id_fkey_7000309 | fkey_reference_table.referencing_referencing_table_7000309 | fkey_reference_table.referencing_table_7000301 + referencing_referencing_table_id_fkey_7000310 | fkey_reference_table.referencing_referencing_table_7000310 | fkey_reference_table.referencing_table_7000302 + referencing_referencing_table_id_fkey_7000311 | fkey_reference_table.referencing_referencing_table_7000311 | fkey_reference_table.referencing_table_7000303 + referencing_referencing_table_id_fkey_7000312 | fkey_reference_table.referencing_referencing_table_7000312 | fkey_reference_table.referencing_table_7000304 + referencing_referencing_table_id_fkey_7000313 | fkey_reference_table.referencing_referencing_table_7000313 | fkey_reference_table.referencing_table_7000305 + referencing_referencing_table_id_fkey_7000314 | fkey_reference_table.referencing_referencing_table_7000314 | fkey_reference_table.referencing_table_7000306 (16 rows) INSERT INTO referenced_table SELECT x, x+1 FROM generate_series(1,1000) AS f(x); @@ -1211,15 +1211,15 @@ INSERT INTO referencing_table SELECT x, x+1, x+2 FROM generate_series(1,999) AS INSERT INTO referencing_referencing_table SELECT x, x+1 FROM generate_series(1,999) AS f(x); DELETE FROM referenced_table WHERE test_column > 800; SELECT max(ref_id) FROM referencing_referencing_table; - max ------ + max +--------------------------------------------------------------------- 800 (1 row) DROP TABLE referenced_table CASCADE; NOTICE: drop cascades to constraint fkey_ref on table referencing_table DROP TABLE referencing_table CASCADE; -NOTICE: drop cascades to constraint referencing_referencing_table_id_ref_id_fkey on table referencing_referencing_table +NOTICE: drop cascades to constraint referencing_referencing_table_id_fkey on table referencing_referencing_table DROP TABLE referencing_referencing_table; -- test if create_distributed_table works in transactions with some edge cases -- the following checks if create_distributed_table works on foreign keys when @@ -1227,23 +1227,23 @@ DROP TABLE referencing_referencing_table; BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES test_table_1(id)); SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_table_3(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES test_table_2(id)); SELECT create_distributed_table('test_table_3', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) DROP TABLE test_table_1 CASCADE; @@ -1255,16 +1255,16 @@ ROLLBACK; BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id); @@ -1278,16 +1278,16 @@ COMMIT; BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_1', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY); SELECT create_reference_table('test_table_2'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table_1 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_2(id); @@ -1305,9 +1305,9 @@ BEGIN; INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i; SELECT create_reference_table('test_table_1'); NOTICE: Copying data from local table... - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); @@ -1321,15 +1321,15 @@ BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE INDEX i1 ON test_table_1(id); @@ -1341,27 +1341,27 @@ COMMIT; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count -------- + count +--------------------------------------------------------------------- 8 (1 row) ALTER TABLE test_table_2 DROP CONSTRAINT test_table_2_value_1_fkey; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -1371,15 +1371,15 @@ BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table_2 ADD CONSTRAINT foreign_key FOREIGN KEY(value_1) REFERENCES test_table_1(id); @@ -1390,8 +1390,8 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -1401,22 +1401,22 @@ ERROR: table "test_table_1" does not exist CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table_1 DROP CONSTRAINT test_table_1_pkey CASCADE; NOTICE: drop cascades to constraint test_table_2_value_1_fkey on table test_table_2 SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -1426,23 +1426,23 @@ BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table_1 DROP CONSTRAINT test_table_1_pkey CASCADE; NOTICE: drop cascades to constraint test_table_2_value_1_fkey on table test_table_2 COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -1451,21 +1451,21 @@ DROP TABLE test_table_1, test_table_2; CREATE TABLE test_table_1(id int PRIMARY KEY, id2 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table_2 DROP COLUMN value_1; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -1475,22 +1475,22 @@ CREATE TABLE test_table_1(id int PRIMARY KEY, id2 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); BEGIN; SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table_2 DROP COLUMN value_1; COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -1499,22 +1499,22 @@ DROP TABLE test_table_1, test_table_2; CREATE TABLE test_table_1(id int PRIMARY KEY, id2 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table_1 DROP COLUMN id CASCADE; NOTICE: drop cascades to constraint test_table_2_value_1_fkey on table test_table_2 SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -1524,23 +1524,23 @@ CREATE TABLE test_table_1(id int PRIMARY KEY, id2 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); BEGIN; SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table_1 DROP COLUMN id CASCADE; NOTICE: drop cascades to constraint test_table_2_value_1_fkey on table test_table_2 COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -1549,15 +1549,15 @@ DROP TABLE test_table_1, test_table_2; CREATE TABLE test_table_1(id int PRIMARY KEY, id2 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_table_1 VALUES (1,1), (2,2), (3,3); @@ -1570,10 +1570,10 @@ INSERT INTO test_table_2 VALUES (4,2147483648); -- should fail since there is a bigint out of integer range > (2^32 - 1) ALTER TABLE test_table_2 ALTER COLUMN value_1 SET DATA TYPE int; ERROR: integer out of range -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count -------- + count +--------------------------------------------------------------------- 8 (1 row) @@ -1585,15 +1585,15 @@ CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); BEGIN; SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE test_table_2 ALTER COLUMN value_1 SET DATA TYPE bigint; @@ -1601,8 +1601,8 @@ BEGIN; NOTICE: drop cascades to constraint test_table_2_value_1_fkey on table test_table_2 COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -1611,15 +1611,15 @@ DROP TABLE test_table_1, test_table_2; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_table_1 VALUES (1),(2),(3); @@ -1627,8 +1627,8 @@ INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3); TRUNCATE test_table_1 CASCADE; NOTICE: truncate cascades to table "test_table_2" SELECT * FROM test_table_2; - id | value_1 -----+--------- + id | value_1 +--------------------------------------------------------------------- (0 rows) DROP TABLE test_table_1, test_table_2; @@ -1636,15 +1636,15 @@ DROP TABLE test_table_1, test_table_2; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_table_1 VALUES (1),(2),(3); @@ -1654,8 +1654,8 @@ BEGIN; NOTICE: truncate cascades to table "test_table_2" COMMIT; SELECT * FROM test_table_2; - id | value_1 -----+--------- + id | value_1 +--------------------------------------------------------------------- (0 rows) DROP TABLE test_table_1, test_table_2; @@ -1664,15 +1664,15 @@ CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); BEGIN; SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_table_1 VALUES (1),(2),(3); @@ -1681,8 +1681,8 @@ BEGIN; NOTICE: truncate cascades to table "test_table_2" COMMIT; SELECT * FROM test_table_2; - id | value_1 -----+--------- + id | value_1 +--------------------------------------------------------------------- (0 rows) DROP TABLE test_table_1, test_table_2; @@ -1690,28 +1690,28 @@ DROP TABLE test_table_1, test_table_2; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_table_1 VALUES (1),(2),(3); INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3); TRUNCATE test_table_2 CASCADE; SELECT * FROM test_table_2; - id | value_1 -----+--------- + id | value_1 +--------------------------------------------------------------------- (0 rows) SELECT * FROM test_table_1; - id ----- + id +--------------------------------------------------------------------- 1 2 3 @@ -1722,15 +1722,15 @@ DROP TABLE test_table_1, test_table_2; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_table_1 VALUES (1),(2),(3); @@ -1739,13 +1739,13 @@ BEGIN; TRUNCATE test_table_2 CASCADE; COMMIT; SELECT * FROM test_table_2; - id | value_1 -----+--------- + id | value_1 +--------------------------------------------------------------------- (0 rows) SELECT * FROM test_table_1; - id ----- + id +--------------------------------------------------------------------- 1 2 3 @@ -1758,21 +1758,21 @@ CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); CREATE TABLE test_table_3(id int PRIMARY KEY, value_1 int); SELECT create_reference_table('test_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_3', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -1796,15 +1796,15 @@ select create_reference_table('referencing_table'); ERROR: distributing partitioned tables in only supported for hash-distributed tables -- partitioned tables are supported as hash distributed table SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- add foreign constraints in between partitions @@ -1814,22 +1814,22 @@ ALTER TABLE referencing_table_4 ADD CONSTRAINT fkey FOREIGN KEY (id) REFERENCES ALTER TABLE referencing_table_4 ADD CONSTRAINT fkey_to_ref FOREIGN KEY (value_1) REFERENCES referenced_table; -- should fail since the data will flow to partitioning_test_4 and it has a foreign constraint to partitioning_test_0 on id column INSERT INTO referencing_table VALUES (0, 5); -ERROR: insert or update on table "referencing_table_4_7000540" violates foreign key constraint "fkey_7000540" -DETAIL: Key (id)=(0) is not present in table "referencing_table_0_7000524". -CONTEXT: while executing command on localhost:57638 +ERROR: insert or update on table "referencing_table_4_7000540" violates foreign key constraint "fkey_xxxxxxx" +DETAIL: Key (id)=(X) is not present in table "referencing_table_0_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx -- should succeed on partitioning_test_0 INSERT INTO referencing_table VALUES (0, 1); SELECT * FROM referencing_table; - id | value_1 -----+--------- + id | value_1 +--------------------------------------------------------------------- 0 | 1 (1 row) -- should fail since partitioning_test_4 has foreign constraint to referenced_table on value_1 column INSERT INTO referencing_table VALUES (0, 5); ERROR: insert or update on table "referencing_table_4_7000540" violates foreign key constraint "fkey_to_ref_7000540" -DETAIL: Key (value_1)=(5) is not present in table "referenced_table_7000512". -CONTEXT: while executing command on localhost:57638 +DETAIL: Key (value_1)=(5) is not present in table "referenced_table_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx INSERT INTO referenced_table VALUES(5,5); -- should succeed since both of the foreign constraints are positive INSERT INTO referencing_table VALUES (0, 5); @@ -1840,8 +1840,8 @@ BEGIN; TRUNCATE referencing_table, referenced_table; ALTER TABLE referencing_table ADD COLUMN x INT; SELECT * FROM referencing_table; - id | value_1 | x -----+---------+--- + id | value_1 | x +--------------------------------------------------------------------- (0 rows) ROLLBACK; @@ -1849,8 +1849,8 @@ BEGIN; TRUNCATE referenced_table, referencing_table; ALTER TABLE referencing_table ADD COLUMN x INT; SELECT * FROM referencing_table; - id | value_1 | x -----+---------+--- + id | value_1 | x +--------------------------------------------------------------------- (0 rows) ROLLBACK; diff --git a/src/test/regress/expected/full_join.out b/src/test/regress/expected/full_join.out index 9cfbdcd82..5c37b2e52 100644 --- a/src/test/regress/expected/full_join.out +++ b/src/test/regress/expected/full_join.out @@ -8,21 +8,21 @@ CREATE TABLE test_table_1(id int, val1 int); CREATE TABLE test_table_2(id bigint, val1 int); CREATE TABLE test_table_3(id int, val1 bigint); SELECT create_distributed_table('test_table_1', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_3', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_table_1 VALUES(1,1),(2,2),(3,3); @@ -30,8 +30,8 @@ INSERT INTO test_table_2 VALUES(2,2),(3,3),(4,4); INSERT INTO test_table_3 VALUES(1,1),(3,3),(4,5); -- Simple full outer join SELECT id FROM test_table_1 FULL JOIN test_table_3 using(id) ORDER BY 1; - id ----- + id +--------------------------------------------------------------------- 1 2 3 @@ -40,10 +40,10 @@ SELECT id FROM test_table_1 FULL JOIN test_table_3 using(id) ORDER BY 1; -- Get all columns as the result of the full join SELECT * FROM test_table_1 FULL JOIN test_table_3 using(id) ORDER BY 1; - id | val1 | val1 -----+------+------ + id | val1 | val1 +--------------------------------------------------------------------- 1 | 1 | 1 - 2 | 2 | + 2 | 2 | 3 | 3 | 3 4 | | 5 (4 rows) @@ -55,13 +55,13 @@ SELECT * FROM (SELECT test_table_1.id FROM test_table_1 FULL JOIN test_table_3 using(id)) as j2 USING(id) ORDER BY 1; - id ----- + id +--------------------------------------------------------------------- 1 2 3 - - + + (5 rows) -- Join subqueries using multiple columns @@ -71,19 +71,19 @@ SELECT * FROM (SELECT test_table_1.id, test_table_1.val1 FROM test_table_1 FULL JOIN test_table_3 using(id)) as j2 USING(id, val1) ORDER BY 1; - id | val1 -----+------ + id | val1 +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 - | - | + | + | (5 rows) -- Full join using multiple columns SELECT * FROM test_table_1 FULL JOIN test_table_3 USING(id, val1) ORDER BY 1; - id | val1 -----+------ + id | val1 +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -97,8 +97,8 @@ WHERE id::bigint < 55 GROUP BY id ORDER BY 2 ASC LIMIT 3; - count | avg_value | not_null --------+-----------+---------- + count | avg_value | not_null +--------------------------------------------------------------------- 1 | 2 | t 1 | 6 | t 1 | 12 | t @@ -108,8 +108,8 @@ SELECT max(val1) FROM test_table_1 FULL JOIN test_table_3 USING(id, val1) GROUP BY test_table_1.id ORDER BY 1; - max ------ + max +--------------------------------------------------------------------- 1 2 3 @@ -121,8 +121,8 @@ SELECT max(val1) FROM test_table_1 LEFT JOIN test_table_3 USING(id, val1) GROUP BY test_table_1.id ORDER BY 1; - max ------ + max +--------------------------------------------------------------------- 1 2 3 @@ -138,36 +138,36 @@ INSERT INTO test_table_2 VALUES(7, NULL); INSERT INTO test_table_3 VALUES(7, NULL); -- Get all columns as the result of the full join SELECT * FROM test_table_1 FULL JOIN test_table_3 using(id) ORDER BY 1; - id | val1 | val1 -----+------+------ + id | val1 | val1 +--------------------------------------------------------------------- 1 | 1 | 1 - 2 | 2 | + 2 | 2 | 3 | 3 | 3 4 | | 5 - 7 | | + 7 | | (5 rows) -- Get the same result (with multiple id) SELECT * FROM test_table_1 FULL JOIN test_table_3 ON (test_table_1.id = test_table_3.id) ORDER BY 1; - id | val1 | id | val1 -----+------+----+------ + id | val1 | id | val1 +--------------------------------------------------------------------- 1 | 1 | 1 | 1 - 2 | 2 | | + 2 | 2 | | 3 | 3 | 3 | 3 - 7 | | 7 | + 7 | | 7 | | | 4 | 5 (5 rows) -- Full join using multiple columns SELECT * FROM test_table_1 FULL JOIN test_table_3 USING(id, val1) ORDER BY 1; - id | val1 -----+------ + id | val1 +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 4 | 5 - 7 | - 7 | + 7 | + 7 | (6 rows) -- In order to make the same test with different data types use text-varchar pair @@ -178,23 +178,23 @@ DROP TABLE test_table_3; CREATE TABLE test_table_1(id int, val1 text); CREATE TABLE test_table_2(id int, val1 varchar(30)); SELECT create_distributed_table('test_table_1', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_table_1 VALUES(1,'val_1'),(2,'val_2'),(3,'val_3'), (4, NULL); INSERT INTO test_table_2 VALUES(2,'val_2'),(3,'val_3'),(4,'val_4'), (5, NULL); -- Simple full outer join SELECT id FROM test_table_1 FULL JOIN test_table_2 using(id) ORDER BY 1; - id ----- + id +--------------------------------------------------------------------- 1 2 3 @@ -204,13 +204,13 @@ SELECT id FROM test_table_1 FULL JOIN test_table_2 using(id) ORDER BY 1; -- Get all columns as the result of the full join SELECT * FROM test_table_1 FULL JOIN test_table_2 using(id) ORDER BY 1; - id | val1 | val1 -----+-------+------- - 1 | val_1 | + id | val1 | val1 +--------------------------------------------------------------------- + 1 | val_1 | 2 | val_2 | val_2 3 | val_3 | val_3 4 | | val_4 - 5 | | + 5 | | (5 rows) -- Join subqueries using multiple columns @@ -220,28 +220,28 @@ SELECT * FROM (SELECT test_table_2.id, test_table_2.val1 FROM test_table_1 FULL JOIN test_table_2 using(id)) as j2 USING(id, val1) ORDER BY 1,2; - id | val1 -----+------- + id | val1 +--------------------------------------------------------------------- 1 | val_1 2 | val_2 3 | val_3 4 | val_4 - 4 | - 5 | - | - | + 4 | + 5 | + | + | (8 rows) -- Full join using multiple columns SELECT * FROM test_table_1 FULL JOIN test_table_2 USING(id, val1) ORDER BY 1,2; - id | val1 -----+------- + id | val1 +--------------------------------------------------------------------- 1 | val_1 2 | val_2 3 | val_3 4 | val_4 - 4 | - 5 | + 4 | + 5 | (6 rows) DROP SCHEMA full_join CASCADE; diff --git a/src/test/regress/expected/intermediate_result_pruning.out b/src/test/regress/expected/intermediate_result_pruning.out index f9850c7d8..c95fec934 100644 --- a/src/test/regress/expected/intermediate_result_pruning.out +++ b/src/test/regress/expected/intermediate_result_pruning.out @@ -6,30 +6,30 @@ SET citus.next_shard_id TO 1480000; SET citus.shard_replication_factor = 1; CREATE TABLE table_1 (key int, value text); SELECT create_distributed_table('table_1', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table_2 (key int, value text); SELECT create_distributed_table('table_2', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table_3 (key int, value text); SELECT create_distributed_table('table_3', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE ref_table (key int, value text); SELECT create_reference_table('ref_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- load some data @@ -47,12 +47,12 @@ SELECT count(*) FROM some_values_1 JOIN table_2 USING (key); -DEBUG: generating subplan 5_1 for CTE some_values_1: SELECT key FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) -DEBUG: Subplan 5_1 will be sent to localhost:57637 -DEBUG: Subplan 5_1 will be sent to localhost:57638 - count -------- +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 2 (1 row) @@ -65,11 +65,11 @@ SELECT count(*) FROM some_values_1 JOIN table_2 USING (key) WHERE table_2.key = 1; -DEBUG: generating subplan 7_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) -DEBUG: Subplan 7_1 will be sent to localhost:57637 - count -------- +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 0 (1 row) @@ -82,12 +82,12 @@ SELECT count(*) FROM some_values_1 JOIN ref_table USING (key); -DEBUG: generating subplan 9_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key)) -DEBUG: Subplan 9_1 will be sent to localhost:57637 -DEBUG: Subplan 9_1 will be sent to localhost:57638 - count -------- +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key)) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 2 (1 row) @@ -101,13 +101,13 @@ SELECT count(*) FROM some_values_2 JOIN table_2 USING (key) WHERE table_2.key = 1; -DEBUG: generating subplan 11_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: generating subplan 11_2 for CTE some_values_2: SELECT key, random() AS random FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 -DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('11_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) -DEBUG: Subplan 11_1 will be sent to localhost:57638 -DEBUG: Subplan 11_2 will be sent to localhost:57637 - count -------- +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT key, random() AS random FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 0 (1 row) @@ -121,14 +121,14 @@ SELECT count(*) FROM some_values_2 JOIN table_2 USING (key) WHERE table_2.key = 3; -DEBUG: generating subplan 14_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: generating subplan 14_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) -DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) -DEBUG: Subplan 14_1 will be sent to localhost:57637 -DEBUG: Subplan 14_1 will be sent to localhost:57638 -DEBUG: Subplan 14_2 will be sent to localhost:57638 - count -------- +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 1 (1 row) @@ -143,14 +143,14 @@ SELECT count(*) FROM (some_values_2 JOIN table_2 USING (key)) JOIN some_values_1 USING (key) WHERE table_2.key = 3; -DEBUG: generating subplan 17_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: generating subplan 17_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) -DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('17_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) -DEBUG: Subplan 17_1 will be sent to localhost:57638 -DEBUG: Subplan 17_1 will be sent to localhost:57637 -DEBUG: Subplan 17_2 will be sent to localhost:57638 - count -------- +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 1 (1 row) @@ -165,14 +165,14 @@ SELECT count(*) FROM (some_values_2 JOIN table_2 USING (key)) JOIN some_values_1 USING (key) WHERE table_2.key = 3; -DEBUG: generating subplan 20_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: generating subplan 20_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) -DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('20_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) -DEBUG: Subplan 20_1 will be sent to localhost:57638 -DEBUG: Subplan 20_1 will be sent to localhost:57637 -DEBUG: Subplan 20_2 will be sent to localhost:57638 - count -------- +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 0 (1 row) @@ -188,13 +188,13 @@ SELECT count(*) FROM (some_values_2 JOIN table_2 USING (key)) JOIN some_values_1 USING (key) WHERE table_2.key = 1; -DEBUG: generating subplan 23_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: generating subplan 23_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('23_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) -DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('23_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('23_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) -DEBUG: Subplan 23_1 will be sent to localhost:57637 -DEBUG: Subplan 23_2 will be sent to localhost:57637 - count -------- +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 0 (1 row) @@ -207,15 +207,15 @@ SELECT count(*) FROM (some_values_2 JOIN table_2 USING (key)) JOIN some_values_1 USING (key) WHERE table_2.key != 3; -DEBUG: generating subplan 26_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: generating subplan 26_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) -DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('26_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.<>) 3) -DEBUG: Subplan 26_1 will be sent to localhost:57637 -DEBUG: Subplan 26_1 will be sent to localhost:57638 -DEBUG: Subplan 26_2 will be sent to localhost:57637 -DEBUG: Subplan 26_2 will be sent to localhost:57638 - count -------- +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.<>) 3) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 1 (1 row) @@ -230,15 +230,15 @@ SELECT count(*) FROM (some_values_2 JOIN table_2 USING (key)) JOIN some_values_1 USING (key) WHERE table_2.key != 3; -DEBUG: generating subplan 29_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: generating subplan 29_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('29_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) -DEBUG: Plan 29 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('29_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('29_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.<>) 3) -DEBUG: Subplan 29_1 will be sent to localhost:57637 -DEBUG: Subplan 29_1 will be sent to localhost:57638 -DEBUG: Subplan 29_2 will be sent to localhost:57637 -DEBUG: Subplan 29_2 will be sent to localhost:57638 - count -------- +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.<>) 3) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 0 (1 row) @@ -251,12 +251,12 @@ SELECT count(*) FROM (some_values_1 JOIN ref_table USING (key)) JOIN table_2 USING (key); -DEBUG: generating subplan 32_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.ref_table WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('32_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key)) JOIN intermediate_result_pruning.table_2 USING (key)) -DEBUG: Subplan 32_1 will be sent to localhost:57637 -DEBUG: Subplan 32_1 will be sent to localhost:57638 - count -------- +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.ref_table WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key)) JOIN intermediate_result_pruning.table_2 USING (key)) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 2 (1 row) @@ -268,8 +268,8 @@ SELECT count(*) FROM (some_values_1 JOIN ref_table USING (key)) JOIN table_2 USING (key) WHERE table_2.key = 1; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -285,13 +285,13 @@ SELECT count(*) FROM some_values_2; -DEBUG: generating subplan 35_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: generating subplan 35_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('35_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1) -DEBUG: Plan 35 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('35_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 -DEBUG: Subplan 35_1 will be sent to localhost:57637 -DEBUG: Subplan 35_2 will be sent to localhost:57637 - count -------- +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 0 (1 row) @@ -311,17 +311,17 @@ SELECT count(*) FROM top_cte JOIN table_2 USING (key); -DEBUG: generating subplan 38_1 for CTE top_cte: WITH some_values_1 AS (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (table_1.value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))), some_values_2 AS (SELECT some_values_1.key, random() AS random FROM (some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1)) SELECT DISTINCT key FROM some_values_2 -DEBUG: generating subplan 39_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: generating subplan 39_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('39_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1) -DEBUG: Plan 39 query after replacing subqueries and CTEs: SELECT DISTINCT key FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('39_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 -DEBUG: Plan 38 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key FROM read_intermediate_result('38_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) top_cte JOIN intermediate_result_pruning.table_2 USING (key)) -DEBUG: Subplan 38_1 will be sent to localhost:57637 -DEBUG: Subplan 38_1 will be sent to localhost:57638 -DEBUG: Subplan 39_1 will be sent to localhost:57637 -DEBUG: Subplan 39_2 will be sent to localhost:57638 - count -------- +DEBUG: generating subplan XXX_1 for CTE top_cte: WITH some_values_1 AS (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (table_1.value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))), some_values_2 AS (SELECT some_values_1.key, random() AS random FROM (some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1)) SELECT DISTINCT key FROM some_values_2 +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT key FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) top_cte JOIN intermediate_result_pruning.table_2 USING (key)) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 0 (1 row) @@ -341,16 +341,16 @@ SELECT count(*) FROM top_cte JOIN table_2 USING (key) WHERE table_2.key = 2; -DEBUG: generating subplan 42_1 for CTE top_cte: WITH some_values_1 AS (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (table_1.value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))), some_values_2 AS (SELECT some_values_1.key, random() AS random FROM (some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1)) SELECT DISTINCT key FROM some_values_2 -DEBUG: generating subplan 43_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: generating subplan 43_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('43_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1) -DEBUG: Plan 43 query after replacing subqueries and CTEs: SELECT DISTINCT key FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('43_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 -DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) top_cte JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 2) -DEBUG: Subplan 42_1 will be sent to localhost:57638 -DEBUG: Subplan 43_1 will be sent to localhost:57637 -DEBUG: Subplan 43_2 will be sent to localhost:57637 - count -------- +DEBUG: generating subplan XXX_1 for CTE top_cte: WITH some_values_1 AS (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (table_1.value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))), some_values_2 AS (SELECT some_values_1.key, random() AS random FROM (some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1)) SELECT DISTINCT key FROM some_values_2 +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT key FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) top_cte JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 2) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 0 (1 row) @@ -363,18 +363,18 @@ WITH some_values_1 AS some_values_3 AS (SELECT key FROM (some_values_2 JOIN table_2 USING (key)) JOIN some_values_1 USING (key)) SELECT * FROM some_values_3 JOIN ref_table ON (true); -DEBUG: generating subplan 46_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: generating subplan 46_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('46_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1) -DEBUG: generating subplan 46_3 for CTE some_values_3: SELECT some_values_2.key FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('46_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('46_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) -DEBUG: Plan 46 query after replacing subqueries and CTEs: SELECT some_values_3.key, ref_table.key, ref_table.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('46_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) some_values_3 JOIN intermediate_result_pruning.ref_table ON (true)) -DEBUG: Subplan 46_1 will be sent to localhost:57637 -DEBUG: Subplan 46_1 will be sent to localhost:57638 -DEBUG: Subplan 46_2 will be sent to localhost:57637 -DEBUG: Subplan 46_2 will be sent to localhost:57638 -DEBUG: Subplan 46_3 will be sent to localhost:57637 -DEBUG: Subplan 46_3 will be sent to localhost:57638 - key | key | value ------+-----+------- +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1) +DEBUG: generating subplan XXX_3 for CTE some_values_3: SELECT some_values_2.key FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT some_values_3.key, ref_table.key, ref_table.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) some_values_3 JOIN intermediate_result_pruning.ref_table ON (true)) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx + key | key | value +--------------------------------------------------------------------- (0 rows) -- join on intermediate results, so should only @@ -384,13 +384,13 @@ WITH some_values_1 AS some_values_2 AS (SELECT key, random() FROM table_2 WHERE value IN ('3', '4')) SELECT count(*) FROM some_values_2 JOIN some_values_1 USING (key); -DEBUG: generating subplan 50_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: generating subplan 50_2 for CTE some_values_2: SELECT key, random() AS random FROM intermediate_result_pruning.table_2 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: Plan 50 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('50_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('50_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) -DEBUG: Subplan 50_1 will be sent to localhost:57638 -DEBUG: Subplan 50_2 will be sent to localhost:57638 - count -------- +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT key, random() AS random FROM intermediate_result_pruning.table_2 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 2 (1 row) @@ -401,13 +401,13 @@ WITH some_values_1 AS some_values_2 AS (SELECT key, random() FROM table_2 WHERE value IN ('3', '4')) SELECT count(*) FROM some_values_2 JOIN some_values_1 USING (key) WHERE false; -DEBUG: generating subplan 53_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: generating subplan 53_2 for CTE some_values_2: SELECT key, random() AS random FROM intermediate_result_pruning.table_2 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: Plan 53 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('53_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('53_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE false -DEBUG: Subplan 53_1 will be sent to localhost:57637 -DEBUG: Subplan 53_2 will be sent to localhost:57637 - count -------- +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT key, random() AS random FROM intermediate_result_pruning.table_2 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE false +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 0 (1 row) @@ -423,13 +423,13 @@ SELECT count(*) FROM some_values_3; -DEBUG: generating subplan 56_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) -DEBUG: generating subplan 56_2 for CTE some_values_3: SELECT key, random() AS random FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('56_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 -DEBUG: Plan 56 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('56_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_3 -DEBUG: Subplan 56_1 will be sent to localhost:57638 -DEBUG: Subplan 56_2 will be sent to localhost:57637 - count -------- +DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) +DEBUG: generating subplan XXX_2 for CTE some_values_3: SELECT key, random() AS random FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_3 +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 2 (1 row) @@ -472,24 +472,24 @@ SELECT count(*) FROM ) as level_6, table_1 WHERE table_1.key::int = level_6.min::int GROUP BY table_1.value ) as bar; -DEBUG: generating subplan 59_1 for subquery SELECT count(*) AS cnt, value FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 1) GROUP BY value -DEBUG: generating subplan 59_2 for subquery SELECT avg((table_2.value)::integer) AS avg FROM (SELECT level_1.cnt FROM (SELECT intermediate_result.cnt, intermediate_result.value FROM read_intermediate_result('59_1'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint, value text)) level_1, intermediate_result_pruning.table_1 WHERE ((table_1.key OPERATOR(pg_catalog.=) level_1.cnt) AND (table_1.key OPERATOR(pg_catalog.=) 3))) level_2, intermediate_result_pruning.table_2 WHERE ((table_2.key OPERATOR(pg_catalog.=) level_2.cnt) AND (table_2.key OPERATOR(pg_catalog.=) 5)) GROUP BY level_2.cnt -DEBUG: generating subplan 59_3 for subquery SELECT max(table_1.value) AS mx_val_1 FROM (SELECT intermediate_result.avg FROM read_intermediate_result('59_2'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) level_3, intermediate_result_pruning.table_1 WHERE (((table_1.value)::numeric OPERATOR(pg_catalog.=) level_3.avg) AND (table_1.key OPERATOR(pg_catalog.=) 6)) GROUP BY level_3.avg -DEBUG: generating subplan 59_4 for subquery SELECT avg((table_2.value)::integer) AS avg_ev_type FROM (SELECT intermediate_result.mx_val_1 FROM read_intermediate_result('59_3'::text, 'binary'::citus_copy_format) intermediate_result(mx_val_1 text)) level_4, intermediate_result_pruning.table_2 WHERE ((level_4.mx_val_1)::integer OPERATOR(pg_catalog.=) table_2.key) GROUP BY level_4.mx_val_1 -DEBUG: generating subplan 59_5 for subquery SELECT min(table_1.value) AS min FROM (SELECT intermediate_result.avg_ev_type FROM read_intermediate_result('59_4'::text, 'binary'::citus_copy_format) intermediate_result(avg_ev_type numeric)) level_5, intermediate_result_pruning.table_1 WHERE ((level_5.avg_ev_type OPERATOR(pg_catalog.=) (table_1.key)::numeric) AND (table_1.key OPERATOR(pg_catalog.>) 111)) GROUP BY level_5.avg_ev_type -DEBUG: generating subplan 59_6 for subquery SELECT avg((level_6.min)::integer) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('59_5'::text, 'binary'::citus_copy_format) intermediate_result(min text)) level_6, intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) (level_6.min)::integer) GROUP BY table_1.value -DEBUG: Plan 59 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('59_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar -DEBUG: Subplan 59_1 will be sent to localhost:57638 -DEBUG: Subplan 59_2 will be sent to localhost:57637 -DEBUG: Subplan 59_3 will be sent to localhost:57637 -DEBUG: Subplan 59_3 will be sent to localhost:57638 -DEBUG: Subplan 59_4 will be sent to localhost:57637 -DEBUG: Subplan 59_4 will be sent to localhost:57638 -DEBUG: Subplan 59_5 will be sent to localhost:57637 -DEBUG: Subplan 59_5 will be sent to localhost:57638 -DEBUG: Subplan 59_6 will be sent to localhost:57637 - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT count(*) AS cnt, value FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 1) GROUP BY value +DEBUG: generating subplan XXX_2 for subquery SELECT avg((table_2.value)::integer) AS avg FROM (SELECT level_1.cnt FROM (SELECT intermediate_result.cnt, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint, value text)) level_1, intermediate_result_pruning.table_1 WHERE ((table_1.key OPERATOR(pg_catalog.=) level_1.cnt) AND (table_1.key OPERATOR(pg_catalog.=) 3))) level_2, intermediate_result_pruning.table_2 WHERE ((table_2.key OPERATOR(pg_catalog.=) level_2.cnt) AND (table_2.key OPERATOR(pg_catalog.=) 5)) GROUP BY level_2.cnt +DEBUG: generating subplan XXX_3 for subquery SELECT max(table_1.value) AS mx_val_1 FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) level_3, intermediate_result_pruning.table_1 WHERE (((table_1.value)::numeric OPERATOR(pg_catalog.=) level_3.avg) AND (table_1.key OPERATOR(pg_catalog.=) 6)) GROUP BY level_3.avg +DEBUG: generating subplan XXX_4 for subquery SELECT avg((table_2.value)::integer) AS avg_ev_type FROM (SELECT intermediate_result.mx_val_1 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(mx_val_1 text)) level_4, intermediate_result_pruning.table_2 WHERE ((level_4.mx_val_1)::integer OPERATOR(pg_catalog.=) table_2.key) GROUP BY level_4.mx_val_1 +DEBUG: generating subplan XXX_5 for subquery SELECT min(table_1.value) AS min FROM (SELECT intermediate_result.avg_ev_type FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(avg_ev_type numeric)) level_5, intermediate_result_pruning.table_1 WHERE ((level_5.avg_ev_type OPERATOR(pg_catalog.=) (table_1.key)::numeric) AND (table_1.key OPERATOR(pg_catalog.>) 111)) GROUP BY level_5.avg_ev_type +DEBUG: generating subplan XXX_6 for subquery SELECT avg((level_6.min)::integer) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(min text)) level_6, intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) (level_6.min)::integer) GROUP BY table_1.value +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_4 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_4 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_5 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_5 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_6 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 0 (1 row) @@ -531,21 +531,21 @@ SELECT count(*) FROM WHERE table_1.key::int = level_6.min::int AND table_1.key = 4 GROUP BY table_1.value ) as bar; -DEBUG: generating subplan 66_1 for subquery SELECT count(*) AS cnt, value FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 1) GROUP BY value -DEBUG: generating subplan 66_2 for subquery SELECT avg((table_2.value)::integer) AS avg FROM (SELECT level_1.cnt FROM (SELECT intermediate_result.cnt, intermediate_result.value FROM read_intermediate_result('66_1'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint, value text)) level_1, intermediate_result_pruning.table_1 WHERE ((table_1.key OPERATOR(pg_catalog.=) level_1.cnt) AND (table_1.key OPERATOR(pg_catalog.=) 3))) level_2, intermediate_result_pruning.table_2 WHERE ((table_2.key OPERATOR(pg_catalog.=) level_2.cnt) AND (table_2.key OPERATOR(pg_catalog.=) 5)) GROUP BY level_2.cnt -DEBUG: generating subplan 66_3 for subquery SELECT max(table_1.value) AS mx_val_1 FROM (SELECT intermediate_result.avg FROM read_intermediate_result('66_2'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) level_3, intermediate_result_pruning.table_1 WHERE (((table_1.value)::numeric OPERATOR(pg_catalog.=) level_3.avg) AND (table_1.key OPERATOR(pg_catalog.=) 6)) GROUP BY level_3.avg -DEBUG: generating subplan 66_4 for subquery SELECT avg((table_2.value)::integer) AS avg_ev_type FROM (SELECT intermediate_result.mx_val_1 FROM read_intermediate_result('66_3'::text, 'binary'::citus_copy_format) intermediate_result(mx_val_1 text)) level_4, intermediate_result_pruning.table_2 WHERE (((level_4.mx_val_1)::integer OPERATOR(pg_catalog.=) table_2.key) AND (table_2.key OPERATOR(pg_catalog.=) 1)) GROUP BY level_4.mx_val_1 -DEBUG: generating subplan 66_5 for subquery SELECT min(table_1.value) AS min FROM (SELECT intermediate_result.avg_ev_type FROM read_intermediate_result('66_4'::text, 'binary'::citus_copy_format) intermediate_result(avg_ev_type numeric)) level_5, intermediate_result_pruning.table_1 WHERE ((level_5.avg_ev_type OPERATOR(pg_catalog.=) (table_1.key)::numeric) AND (table_1.key OPERATOR(pg_catalog.=) 111)) GROUP BY level_5.avg_ev_type -DEBUG: generating subplan 66_6 for subquery SELECT avg((level_6.min)::integer) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('66_5'::text, 'binary'::citus_copy_format) intermediate_result(min text)) level_6, intermediate_result_pruning.table_1 WHERE ((table_1.key OPERATOR(pg_catalog.=) (level_6.min)::integer) AND (table_1.key OPERATOR(pg_catalog.=) 4)) GROUP BY table_1.value -DEBUG: Plan 66 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('66_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar -DEBUG: Subplan 66_1 will be sent to localhost:57638 -DEBUG: Subplan 66_2 will be sent to localhost:57637 -DEBUG: Subplan 66_3 will be sent to localhost:57637 -DEBUG: Subplan 66_4 will be sent to localhost:57638 -DEBUG: Subplan 66_5 will be sent to localhost:57638 -DEBUG: Subplan 66_6 will be sent to localhost:57637 - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT count(*) AS cnt, value FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 1) GROUP BY value +DEBUG: generating subplan XXX_2 for subquery SELECT avg((table_2.value)::integer) AS avg FROM (SELECT level_1.cnt FROM (SELECT intermediate_result.cnt, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint, value text)) level_1, intermediate_result_pruning.table_1 WHERE ((table_1.key OPERATOR(pg_catalog.=) level_1.cnt) AND (table_1.key OPERATOR(pg_catalog.=) 3))) level_2, intermediate_result_pruning.table_2 WHERE ((table_2.key OPERATOR(pg_catalog.=) level_2.cnt) AND (table_2.key OPERATOR(pg_catalog.=) 5)) GROUP BY level_2.cnt +DEBUG: generating subplan XXX_3 for subquery SELECT max(table_1.value) AS mx_val_1 FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) level_3, intermediate_result_pruning.table_1 WHERE (((table_1.value)::numeric OPERATOR(pg_catalog.=) level_3.avg) AND (table_1.key OPERATOR(pg_catalog.=) 6)) GROUP BY level_3.avg +DEBUG: generating subplan XXX_4 for subquery SELECT avg((table_2.value)::integer) AS avg_ev_type FROM (SELECT intermediate_result.mx_val_1 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(mx_val_1 text)) level_4, intermediate_result_pruning.table_2 WHERE (((level_4.mx_val_1)::integer OPERATOR(pg_catalog.=) table_2.key) AND (table_2.key OPERATOR(pg_catalog.=) 1)) GROUP BY level_4.mx_val_1 +DEBUG: generating subplan XXX_5 for subquery SELECT min(table_1.value) AS min FROM (SELECT intermediate_result.avg_ev_type FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(avg_ev_type numeric)) level_5, intermediate_result_pruning.table_1 WHERE ((level_5.avg_ev_type OPERATOR(pg_catalog.=) (table_1.key)::numeric) AND (table_1.key OPERATOR(pg_catalog.=) 111)) GROUP BY level_5.avg_ev_type +DEBUG: generating subplan XXX_6 for subquery SELECT avg((level_6.min)::integer) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(min text)) level_6, intermediate_result_pruning.table_1 WHERE ((table_1.key OPERATOR(pg_catalog.=) (level_6.min)::integer) AND (table_1.key OPERATOR(pg_catalog.=) 4)) GROUP BY table_1.value +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_4 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_5 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_6 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 0 (1 row) @@ -554,13 +554,13 @@ DEBUG: Subplan 66_6 will be sent to localhost:57637 (SELECT key FROM table_1 WHERE key = 1) INTERSECT (SELECT key FROM table_1 WHERE key = 2); -DEBUG: generating subplan 73_1 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 1) -DEBUG: generating subplan 73_2 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 2) -DEBUG: Plan 73 query after replacing subqueries and CTEs: SELECT intermediate_result.key FROM read_intermediate_result('73_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer) INTERSECT SELECT intermediate_result.key FROM read_intermediate_result('73_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer) -DEBUG: Subplan 73_1 will be sent to localhost:57638 -DEBUG: Subplan 73_2 will be sent to localhost:57638 - key ------ +DEBUG: generating subplan XXX_1 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 1) +DEBUG: generating subplan XXX_2 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 2) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer) INTERSECT SELECT intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx + key +--------------------------------------------------------------------- (0 rows) -- the intermediate results should just hit a single worker @@ -579,18 +579,18 @@ cte_2 AS SELECT * FROM cte_1 UNION SELECT * FROM cte_2; -DEBUG: generating subplan 76_1 for CTE cte_1: SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 1) INTERSECT SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 2) -DEBUG: generating subplan 77_1 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 1) -DEBUG: generating subplan 77_2 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 2) -DEBUG: Plan 77 query after replacing subqueries and CTEs: SELECT intermediate_result.key FROM read_intermediate_result('77_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer) INTERSECT SELECT intermediate_result.key FROM read_intermediate_result('77_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer) -DEBUG: generating subplan 76_2 for CTE cte_2: SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 3) INTERSECT SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 4) -DEBUG: Plan 76 query after replacing subqueries and CTEs: SELECT cte_1.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('76_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_1 UNION SELECT cte_2.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('76_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_2 -DEBUG: Subplan 76_1 will be sent to localhost:57638 -DEBUG: Subplan 77_1 will be sent to localhost:57637 -DEBUG: Subplan 77_2 will be sent to localhost:57637 -DEBUG: Subplan 76_2 will be sent to localhost:57638 - key ------ +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 1) INTERSECT SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 2) +DEBUG: generating subplan XXX_1 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 1) +DEBUG: generating subplan XXX_2 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 2) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer) INTERSECT SELECT intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer) +DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 3) INTERSECT SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 4) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_1.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_1 UNION SELECT cte_2.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_2 +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx + key +--------------------------------------------------------------------- (0 rows) -- one final test with SET operations, where @@ -608,19 +608,19 @@ cte_2 AS SELECT count(*) FROM table_1 JOIN cte_1 USING (key) ) SELECT * FROM cte_2; -DEBUG: generating subplan 81_1 for CTE cte_1: SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 1) INTERSECT SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 2) -DEBUG: generating subplan 82_1 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 1) -DEBUG: generating subplan 82_2 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 2) -DEBUG: Plan 82 query after replacing subqueries and CTEs: SELECT intermediate_result.key FROM read_intermediate_result('82_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer) INTERSECT SELECT intermediate_result.key FROM read_intermediate_result('82_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer) -DEBUG: generating subplan 81_2 for CTE cte_2: SELECT count(*) AS count FROM (intermediate_result_pruning.table_1 JOIN (SELECT intermediate_result.key FROM read_intermediate_result('81_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_1 USING (key)) -DEBUG: Plan 81 query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('81_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) cte_2 -DEBUG: Subplan 81_1 will be sent to localhost:57637 -DEBUG: Subplan 81_1 will be sent to localhost:57638 -DEBUG: Subplan 82_1 will be sent to localhost:57637 -DEBUG: Subplan 82_2 will be sent to localhost:57637 -DEBUG: Subplan 81_2 will be sent to localhost:57638 - count -------- +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 1) INTERSECT SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 2) +DEBUG: generating subplan XXX_1 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 1) +DEBUG: generating subplan XXX_2 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 2) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer) INTERSECT SELECT intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer) +DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT count(*) AS count FROM (intermediate_result_pruning.table_1 JOIN (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_1 USING (key)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) cte_2 +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 0 (1 row) @@ -634,12 +634,12 @@ FROM (SELECT key, random() FROM table_2) as bar WHERE foo.key != bar.key; -DEBUG: generating subplan 86_1 for subquery SELECT key, random() AS random FROM intermediate_result_pruning.table_2 -DEBUG: Plan 86 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1) foo, (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('86_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key) -DEBUG: Subplan 86_1 will be sent to localhost:57637 -DEBUG: Subplan 86_1 will be sent to localhost:57638 - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT key, random() AS random FROM intermediate_result_pruning.table_2 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1) foo, (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 14 (1 row) @@ -652,11 +652,11 @@ FROM (SELECT key, random() FROM table_2) as bar WHERE foo.key != bar.key; -DEBUG: generating subplan 88_1 for subquery SELECT key, random() AS random FROM intermediate_result_pruning.table_2 -DEBUG: Plan 88 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 1)) foo, (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('88_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key) -DEBUG: Subplan 88_1 will be sent to localhost:57637 - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT key, random() AS random FROM intermediate_result_pruning.table_2 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 1)) foo, (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 4 (1 row) @@ -673,17 +673,17 @@ raw_data AS ( DELETE FROM table_2 WHERE key >= (SELECT min(key) FROM select_data WHERE key > 1) RETURNING * ) SELECT * FROM raw_data; -DEBUG: generating subplan 90_1 for CTE select_data: SELECT key, value FROM intermediate_result_pruning.table_1 -DEBUG: generating subplan 90_2 for CTE raw_data: DELETE FROM intermediate_result_pruning.table_2 WHERE (key OPERATOR(pg_catalog.>=) (SELECT min(select_data.key) AS min FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('90_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) select_data WHERE (select_data.key OPERATOR(pg_catalog.>) 1))) RETURNING key, value -DEBUG: generating subplan 92_1 for subquery SELECT min(key) AS min FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('90_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) select_data WHERE (key OPERATOR(pg_catalog.>) 1) -DEBUG: Plan 92 query after replacing subqueries and CTEs: DELETE FROM intermediate_result_pruning.table_2 WHERE (key OPERATOR(pg_catalog.>=) (SELECT intermediate_result.min FROM read_intermediate_result('92_1'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) RETURNING key, value -DEBUG: Plan 90 query after replacing subqueries and CTEs: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('90_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) raw_data -DEBUG: Subplan 90_1 will be sent to localhost:57637 -DEBUG: Subplan 90_2 will be sent to localhost:57638 -DEBUG: Subplan 92_1 will be sent to localhost:57637 -DEBUG: Subplan 92_1 will be sent to localhost:57638 - key | value ------+------- +DEBUG: generating subplan XXX_1 for CTE select_data: SELECT key, value FROM intermediate_result_pruning.table_1 +DEBUG: generating subplan XXX_2 for CTE raw_data: DELETE FROM intermediate_result_pruning.table_2 WHERE (key OPERATOR(pg_catalog.>=) (SELECT min(select_data.key) AS min FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) select_data WHERE (select_data.key OPERATOR(pg_catalog.>) 1))) RETURNING key, value +DEBUG: generating subplan XXX_1 for subquery SELECT min(key) AS min FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) select_data WHERE (key OPERATOR(pg_catalog.>) 1) +DEBUG: Plan XXX query after replacing subqueries and CTEs: DELETE FROM intermediate_result_pruning.table_2 WHERE (key OPERATOR(pg_catalog.>=) (SELECT intermediate_result.min FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) RETURNING key, value +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) raw_data +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx + key | value +--------------------------------------------------------------------- 3 | 3 4 | 4 5 | 5 @@ -703,17 +703,17 @@ raw_data AS ( DELETE FROM table_2 WHERE value::int >= (SELECT min(key) FROM select_data WHERE key > 1 + random()) RETURNING * ) SELECT * FROM raw_data; -DEBUG: generating subplan 94_1 for CTE select_data: SELECT key, value FROM intermediate_result_pruning.table_1 -DEBUG: generating subplan 94_2 for CTE raw_data: DELETE FROM intermediate_result_pruning.table_2 WHERE ((value)::integer OPERATOR(pg_catalog.>=) (SELECT min(select_data.key) AS min FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('94_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) select_data WHERE ((select_data.key)::double precision OPERATOR(pg_catalog.>) ((1)::double precision OPERATOR(pg_catalog.+) random())))) RETURNING key, value -DEBUG: generating subplan 96_1 for subquery SELECT min(key) AS min FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('94_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) select_data WHERE ((key)::double precision OPERATOR(pg_catalog.>) ((1)::double precision OPERATOR(pg_catalog.+) random())) -DEBUG: Plan 96 query after replacing subqueries and CTEs: DELETE FROM intermediate_result_pruning.table_2 WHERE ((value)::integer OPERATOR(pg_catalog.>=) (SELECT intermediate_result.min FROM read_intermediate_result('96_1'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) RETURNING key, value -DEBUG: Plan 94 query after replacing subqueries and CTEs: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('94_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) raw_data -DEBUG: Subplan 94_1 will be sent to localhost:57637 -DEBUG: Subplan 94_2 will be sent to localhost:57638 -DEBUG: Subplan 96_1 will be sent to localhost:57637 -DEBUG: Subplan 96_1 will be sent to localhost:57638 - key | value ------+------- +DEBUG: generating subplan XXX_1 for CTE select_data: SELECT key, value FROM intermediate_result_pruning.table_1 +DEBUG: generating subplan XXX_2 for CTE raw_data: DELETE FROM intermediate_result_pruning.table_2 WHERE ((value)::integer OPERATOR(pg_catalog.>=) (SELECT min(select_data.key) AS min FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) select_data WHERE ((select_data.key)::double precision OPERATOR(pg_catalog.>) ((1)::double precision OPERATOR(pg_catalog.+) random())))) RETURNING key, value +DEBUG: generating subplan XXX_1 for subquery SELECT min(key) AS min FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) select_data WHERE ((key)::double precision OPERATOR(pg_catalog.>) ((1)::double precision OPERATOR(pg_catalog.+) random())) +DEBUG: Plan XXX query after replacing subqueries and CTEs: DELETE FROM intermediate_result_pruning.table_2 WHERE ((value)::integer OPERATOR(pg_catalog.>=) (SELECT intermediate_result.min FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) RETURNING key, value +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) raw_data +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx + key | value +--------------------------------------------------------------------- 3 | 3 4 | 4 5 | 5 @@ -731,14 +731,14 @@ raw_data AS ( DELETE FROM table_2 WHERE value::int >= (SELECT min(key) FROM table_1 WHERE key > random()) AND key = 6 RETURNING * ) SELECT * FROM raw_data; -DEBUG: generating subplan 98_1 for CTE raw_data: DELETE FROM intermediate_result_pruning.table_2 WHERE (((value)::integer OPERATOR(pg_catalog.>=) (SELECT min(table_1.key) AS min FROM intermediate_result_pruning.table_1 WHERE ((table_1.key)::double precision OPERATOR(pg_catalog.>) random()))) AND (key OPERATOR(pg_catalog.=) 6)) RETURNING key, value -DEBUG: generating subplan 99_1 for subquery SELECT min(key) AS min FROM intermediate_result_pruning.table_1 WHERE ((key)::double precision OPERATOR(pg_catalog.>) random()) -DEBUG: Plan 99 query after replacing subqueries and CTEs: DELETE FROM intermediate_result_pruning.table_2 WHERE (((value)::integer OPERATOR(pg_catalog.>=) (SELECT intermediate_result.min FROM read_intermediate_result('99_1'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) AND (key OPERATOR(pg_catalog.=) 6)) RETURNING key, value -DEBUG: Plan 98 query after replacing subqueries and CTEs: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('98_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) raw_data -DEBUG: Subplan 98_1 will be sent to localhost:57637 -DEBUG: Subplan 99_1 will be sent to localhost:57637 - key | value ------+------- +DEBUG: generating subplan XXX_1 for CTE raw_data: DELETE FROM intermediate_result_pruning.table_2 WHERE (((value)::integer OPERATOR(pg_catalog.>=) (SELECT min(table_1.key) AS min FROM intermediate_result_pruning.table_1 WHERE ((table_1.key)::double precision OPERATOR(pg_catalog.>) random()))) AND (key OPERATOR(pg_catalog.=) 6)) RETURNING key, value +DEBUG: generating subplan XXX_1 for subquery SELECT min(key) AS min FROM intermediate_result_pruning.table_1 WHERE ((key)::double precision OPERATOR(pg_catalog.>) random()) +DEBUG: Plan XXX query after replacing subqueries and CTEs: DELETE FROM intermediate_result_pruning.table_2 WHERE (((value)::integer OPERATOR(pg_catalog.>=) (SELECT intermediate_result.min FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) AND (key OPERATOR(pg_catalog.=) 6)) RETURNING key, value +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) raw_data +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx + key | value +--------------------------------------------------------------------- 6 | 6 (1 row) @@ -756,9 +756,9 @@ INSERT INTO table_1 SELECT * FROM table_2 where value IN (SELECT value FROM table_1 WHERE random() > 1) AND key = 1; DEBUG: volatile functions are not allowed in distributed INSERT ... SELECT queries DEBUG: Collecting INSERT ... SELECT results on coordinator -DEBUG: generating subplan 104_1 for subquery SELECT value FROM intermediate_result_pruning.table_1 WHERE (random() OPERATOR(pg_catalog.>) (1)::double precision) -DEBUG: Plan 104 query after replacing subqueries and CTEs: SELECT key, value FROM intermediate_result_pruning.table_2 WHERE ((value OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value FROM read_intermediate_result('104_1'::text, 'binary'::citus_copy_format) intermediate_result(value text))) AND (key OPERATOR(pg_catalog.=) 1)) -DEBUG: Subplan 104_1 will be sent to localhost:57637 +DEBUG: generating subplan XXX_1 for subquery SELECT value FROM intermediate_result_pruning.table_1 WHERE (random() OPERATOR(pg_catalog.>) (1)::double precision) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT key, value FROM intermediate_result_pruning.table_2 WHERE ((value OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text))) AND (key OPERATOR(pg_catalog.=) 1)) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -- a similar query, with more complex subquery INSERT INTO table_1 SELECT * FROM table_2 where key = 1 AND @@ -780,18 +780,18 @@ INSERT INTO table_1 SELECT * FROM cte_2); DEBUG: Set operations are not allowed in distributed INSERT ... SELECT queries DEBUG: Collecting INSERT ... SELECT results on coordinator -DEBUG: generating subplan 107_1 for CTE cte_1: SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 1) INTERSECT SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 2) -DEBUG: generating subplan 108_1 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 1) -DEBUG: generating subplan 108_2 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 2) -DEBUG: Plan 108 query after replacing subqueries and CTEs: SELECT intermediate_result.key FROM read_intermediate_result('108_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer) INTERSECT SELECT intermediate_result.key FROM read_intermediate_result('108_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer) -DEBUG: generating subplan 107_2 for CTE cte_2: SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 3) INTERSECT SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 4) -DEBUG: generating subplan 107_3 for subquery SELECT cte_1.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('107_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_1 UNION SELECT cte_2.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('107_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_2 -DEBUG: Plan 107 query after replacing subqueries and CTEs: SELECT key, value FROM intermediate_result_pruning.table_2 WHERE ((key OPERATOR(pg_catalog.=) 1) AND ((value)::integer OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.key FROM read_intermediate_result('107_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)))) -DEBUG: Subplan 107_1 will be sent to localhost:57637 -DEBUG: Subplan 108_1 will be sent to localhost:57638 -DEBUG: Subplan 108_2 will be sent to localhost:57638 -DEBUG: Subplan 107_2 will be sent to localhost:57637 -DEBUG: Subplan 107_3 will be sent to localhost:57637 +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 1) INTERSECT SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 2) +DEBUG: generating subplan XXX_1 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 1) +DEBUG: generating subplan XXX_2 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 2) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer) INTERSECT SELECT intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer) +DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 3) INTERSECT SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 4) +DEBUG: generating subplan XXX_3 for subquery SELECT cte_1.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_1 UNION SELECT cte_2.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_2 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT key, value FROM intermediate_result_pruning.table_2 WHERE ((key OPERATOR(pg_catalog.=) 1) AND ((value)::integer OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)))) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx -- same query, cte is on the FROM clause -- and this time the final query (and top-level intermediate result) -- hits all the shards because table_2.key != 1 @@ -817,19 +817,19 @@ INSERT INTO table_1 foo.key = table_2.value::int; DEBUG: Set operations are not allowed in distributed INSERT ... SELECT queries DEBUG: Collecting INSERT ... SELECT results on coordinator -DEBUG: generating subplan 114_1 for CTE cte_1: SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 1) INTERSECT SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 2) -DEBUG: generating subplan 115_1 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 1) -DEBUG: generating subplan 115_2 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 2) -DEBUG: Plan 115 query after replacing subqueries and CTEs: SELECT intermediate_result.key FROM read_intermediate_result('115_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer) INTERSECT SELECT intermediate_result.key FROM read_intermediate_result('115_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer) -DEBUG: generating subplan 114_2 for CTE cte_2: SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 3) INTERSECT SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 4) -DEBUG: generating subplan 114_3 for subquery SELECT cte_1.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('114_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_1 UNION SELECT cte_2.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('114_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_2 -DEBUG: Plan 114 query after replacing subqueries and CTEs: SELECT table_2.key, table_2.value FROM intermediate_result_pruning.table_2, (SELECT intermediate_result.key FROM read_intermediate_result('114_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) foo WHERE ((table_2.key OPERATOR(pg_catalog.<>) 1) AND (foo.key OPERATOR(pg_catalog.=) (table_2.value)::integer)) -DEBUG: Subplan 114_1 will be sent to localhost:57637 -DEBUG: Subplan 115_1 will be sent to localhost:57638 -DEBUG: Subplan 115_2 will be sent to localhost:57638 -DEBUG: Subplan 114_2 will be sent to localhost:57637 -DEBUG: Subplan 114_3 will be sent to localhost:57637 -DEBUG: Subplan 114_3 will be sent to localhost:57638 +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 1) INTERSECT SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 2) +DEBUG: generating subplan XXX_1 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 1) +DEBUG: generating subplan XXX_2 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 2) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer) INTERSECT SELECT intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer) +DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 3) INTERSECT SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 4) +DEBUG: generating subplan XXX_3 for subquery SELECT cte_1.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_1 UNION SELECT cte_2.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_2 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table_2.key, table_2.value FROM intermediate_result_pruning.table_2, (SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) foo WHERE ((table_2.key OPERATOR(pg_catalog.<>) 1) AND (foo.key OPERATOR(pg_catalog.=) (table_2.value)::integer)) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx -- append partitioned/heap-type SET citus.replication_model TO statement; -- do not print out 'building index pg_toast_xxxxx_index' messages @@ -837,38 +837,38 @@ SET client_min_messages TO DEFAULT; CREATE TABLE range_partitioned(range_column text, data int); SET client_min_messages TO DEBUG1; SELECT create_distributed_table('range_partitioned', 'range_column', 'range'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_empty_shard('range_partitioned'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 1480013 (1 row) SELECT master_create_empty_shard('range_partitioned'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 1480014 (1 row) SELECT master_create_empty_shard('range_partitioned'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 1480015 (1 row) SELECT master_create_empty_shard('range_partitioned'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 1480016 (1 row) SELECT master_create_empty_shard('range_partitioned'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 1480017 (1 row) @@ -885,11 +885,11 @@ FROM WHERE range_column = 'A' AND data IN (SELECT data FROM range_partitioned); -DEBUG: generating subplan 120_1 for subquery SELECT data FROM intermediate_result_pruning.range_partitioned -DEBUG: Plan 120 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.=) 'A'::text) AND (data OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.data FROM read_intermediate_result('120_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)))) -DEBUG: Subplan 120_1 will be sent to localhost:57637 - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT data FROM intermediate_result_pruning.range_partitioned +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.=) 'A'::text) AND (data OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.data FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)))) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 0 (1 row) @@ -901,12 +901,12 @@ FROM WHERE range_column >= 'A' AND range_column <= 'K' AND data IN (SELECT data FROM range_partitioned); -DEBUG: generating subplan 122_1 for subquery SELECT data FROM intermediate_result_pruning.range_partitioned -DEBUG: Plan 122 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.>=) 'A'::text) AND (range_column OPERATOR(pg_catalog.<=) 'K'::text) AND (data OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.data FROM read_intermediate_result('122_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)))) -DEBUG: Subplan 122_1 will be sent to localhost:57637 -DEBUG: Subplan 122_1 will be sent to localhost:57638 - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT data FROM intermediate_result_pruning.range_partitioned +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.>=) 'A'::text) AND (range_column OPERATOR(pg_catalog.<=) 'K'::text) AND (data OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.data FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)))) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 0 (1 row) @@ -921,12 +921,12 @@ FROM WHERE range_column IN ('A', 'E') AND range_partitioned.data IN (SELECT data FROM some_data); -DEBUG: generating subplan 124_1 for CTE some_data: SELECT data FROM intermediate_result_pruning.range_partitioned -DEBUG: Plan 124 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.=) ANY (ARRAY['A'::text, 'E'::text])) AND (data OPERATOR(pg_catalog.=) ANY (SELECT some_data.data FROM (SELECT intermediate_result.data FROM read_intermediate_result('124_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)) some_data))) -DEBUG: Subplan 124_1 will be sent to localhost:57637 -DEBUG: Subplan 124_1 will be sent to localhost:57638 - count -------- +DEBUG: generating subplan XXX_1 for CTE some_data: SELECT data FROM intermediate_result_pruning.range_partitioned +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.=) ANY (ARRAY['A'::text, 'E'::text])) AND (data OPERATOR(pg_catalog.=) ANY (SELECT some_data.data FROM (SELECT intermediate_result.data FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)) some_data))) +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx + count +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/intermediate_results.out b/src/test/regress/expected/intermediate_results.out index b97cac425..cfdf3382a 100644 --- a/src/test/regress/expected/intermediate_results.out +++ b/src/test/regress/expected/intermediate_results.out @@ -9,14 +9,14 @@ CREATE OR REPLACE FUNCTION pg_catalog.store_intermediate_result_on_node(nodename -- in the same transaction we can read a result BEGIN; SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); - create_intermediate_result ----------------------------- + create_intermediate_result +--------------------------------------------------------------------- 5 (1 row) SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 int); - x | x2 ----+---- + x | x2 +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 @@ -27,8 +27,8 @@ SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 in COMMIT; -- in separate transactions, the result is no longer available SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); - create_intermediate_result ----------------------------- + create_intermediate_result +--------------------------------------------------------------------- 5 (1 row) @@ -37,16 +37,16 @@ ERROR: result "squares" does not exist BEGIN; CREATE TABLE interesting_squares (user_id text, interested_in text); SELECT create_distributed_table('interesting_squares', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO interesting_squares VALUES ('jon', '2'), ('jon', '5'), ('jack', '3'); -- put an intermediate result on all workers SELECT broadcast_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); - broadcast_intermediate_result -------------------------------- + broadcast_intermediate_result +--------------------------------------------------------------------- 5 (1 row) @@ -55,8 +55,8 @@ SELECT x, x2 FROM interesting_squares JOIN (SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 int)) squares ON (x::text = interested_in) WHERE user_id = 'jon' ORDER BY x; - x | x2 ----+---- + x | x2 +--------------------------------------------------------------------- 2 | 4 5 | 25 (2 rows) @@ -65,8 +65,8 @@ END; BEGIN; -- put an intermediate result on all workers SELECT broadcast_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); - broadcast_intermediate_result -------------------------------- + broadcast_intermediate_result +--------------------------------------------------------------------- 5 (1 row) @@ -75,8 +75,8 @@ SELECT x, x2 FROM interesting_squares JOIN (SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 int)) squares ON (x::text = interested_in) ORDER BY x; - x | x2 ----+---- + x | x2 +--------------------------------------------------------------------- 2 | 4 3 | 9 5 | 25 @@ -110,8 +110,8 @@ SET client_min_messages TO DEFAULT; -- try to read the file as text, will fail because of binary encoding BEGIN; SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); - create_intermediate_result ----------------------------- + create_intermediate_result +--------------------------------------------------------------------- 5 (1 row) @@ -121,13 +121,13 @@ END; -- try to read the file with wrong encoding BEGIN; SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); - create_intermediate_result ----------------------------- + create_intermediate_result +--------------------------------------------------------------------- 5 (1 row) SELECT * FROM read_intermediate_result('squares', 'csv') AS res (x int, x2 int); -ERROR: invalid input syntax for type integer: "PGCOPY" +ERROR: invalid input syntax for integer: "PGCOPY" END; -- try a composite type CREATE TYPE intermediate_results.square_type AS (x text, x2 int); @@ -139,8 +139,8 @@ INSERT INTO stored_squares VALUES ('jon', '(5,25)'::intermediate_results.square_ -- composite types change the format to text BEGIN; SELECT create_intermediate_result('stored_squares', 'SELECT square FROM stored_squares'); - create_intermediate_result ----------------------------- + create_intermediate_result +--------------------------------------------------------------------- 4 (1 row) @@ -149,14 +149,14 @@ ERROR: COPY file signature not recognized COMMIT; BEGIN; SELECT create_intermediate_result('stored_squares', 'SELECT square FROM stored_squares'); - create_intermediate_result ----------------------------- + create_intermediate_result +--------------------------------------------------------------------- 4 (1 row) SELECT * FROM read_intermediate_result('stored_squares', 'text') AS res (s intermediate_results.square_type); - s --------- + s +--------------------------------------------------------------------- (2,4) (3,9) (4,16) @@ -167,8 +167,8 @@ COMMIT; BEGIN; -- put an intermediate result in text format on all workers SELECT broadcast_intermediate_result('stored_squares', 'SELECT square, metadata FROM stored_squares'); - broadcast_intermediate_result -------------------------------- + broadcast_intermediate_result +--------------------------------------------------------------------- 4 (1 row) @@ -178,8 +178,8 @@ SELECT * FROM interesting_squares JOIN ( read_intermediate_result('stored_squares', 'text') AS res (s intermediate_results.square_type, m jsonb) ) squares ON ((s).x = interested_in) WHERE user_id = 'jon' ORDER BY 1,2; - user_id | interested_in | s | m ----------+---------------+--------+-------------- + user_id | interested_in | s | m +--------------------------------------------------------------------- jon | 2 | (2,4) | {"value": 2} jon | 5 | (5,25) | {"value": 5} (2 rows) @@ -190,8 +190,8 @@ SELECT * FROM interesting_squares JOIN ( read_intermediate_result('stored_squares', 'text') AS res (s intermediate_results.square_type, m jsonb) ) squares ON ((s).x = interested_in) ORDER BY 1,2; - user_id | interested_in | s | m ----------+---------------+--------+-------------- + user_id | interested_in | s | m +--------------------------------------------------------------------- jack | 3 | (3,9) | {"value": 3} jon | 2 | (2,4) | {"value": 2} jon | 5 | (5,25) | {"value": 5} @@ -201,40 +201,40 @@ END; BEGIN; -- accurate row count estimates for primitive types SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,632) s'); - create_intermediate_result ----------------------------- + create_intermediate_result +--------------------------------------------------------------------- 632 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 int); - QUERY PLAN ------------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Function Scan on read_intermediate_result res (cost=0.00..4.55 rows=632 width=8) (1 row) -- less accurate results for variable types SELECT create_intermediate_result('hellos', $$SELECT s, 'hello-'||s FROM generate_series(1,63) s$$); - create_intermediate_result ----------------------------- + create_intermediate_result +--------------------------------------------------------------------- 63 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_result('hellos', 'binary') AS res (x int, y text); - QUERY PLAN ------------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Function Scan on read_intermediate_result res (cost=0.00..0.32 rows=30 width=36) (1 row) -- not very accurate results for text encoding SELECT create_intermediate_result('stored_squares', 'SELECT square FROM stored_squares'); - create_intermediate_result ----------------------------- + create_intermediate_result +--------------------------------------------------------------------- 4 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_result('stored_squares', 'text') AS res (s intermediate_results.square_type); - QUERY PLAN ----------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Function Scan on read_intermediate_result res (cost=0.00..0.01 rows=1 width=32) (1 row) @@ -245,8 +245,8 @@ TO PROGRAM $$psql -h localhost -p 57636 -U postgres -d regression -c "BEGIN; COPY squares FROM STDIN WITH (format result); CREATE TABLE intermediate_results.squares AS SELECT * FROM read_intermediate_result('squares', 'text') AS res(x int, x2 int); END;"$$ WITH (FORMAT text); SELECT * FROM squares ORDER BY x; - x | x2 ----+---- + x | x2 +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 @@ -271,28 +271,28 @@ BEGIN; SELECT create_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1,3) s'), create_intermediate_result('squares_2', 'SELECT s, s*s FROM generate_series(4,6) s'), create_intermediate_result('squares_3', 'SELECT s, s*s FROM generate_series(7,10) s'); - create_intermediate_result | create_intermediate_result | create_intermediate_result -----------------------------+----------------------------+---------------------------- + create_intermediate_result | create_intermediate_result | create_intermediate_result +--------------------------------------------------------------------- 3 | 3 | 4 (1 row) SELECT count(*) FROM read_intermediate_results(ARRAY[]::text[], 'binary') AS res (x int, x2 int); - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM read_intermediate_results(ARRAY['squares_1']::text[], 'binary') AS res (x int, x2 int); - x | x2 ----+---- + x | x2 +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 (3 rows) SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2', 'squares_3']::text[], 'binary') AS res (x int, x2 int); - x | x2 -----+----- + x | x2 +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 @@ -308,8 +308,8 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2', 'squares COMMIT; -- in separate transactions, the result is no longer available SELECT create_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1,5) s'); - create_intermediate_result ----------------------------- + create_intermediate_result +--------------------------------------------------------------------- 5 (1 row) @@ -318,8 +318,8 @@ ERROR: result "squares_1" does not exist -- error behaviour, and also check that results are deleted on rollback BEGIN; SELECT create_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1,3) s'); - create_intermediate_result ----------------------------- + create_intermediate_result +--------------------------------------------------------------------- 3 (1 row) @@ -335,14 +335,14 @@ ERROR: null array element not allowed in this context ROLLBACK TO SAVEPOINT s1; -- after rollbacks we should be able to run vail read_intermediate_results still. SELECT count(*) FROM read_intermediate_results(ARRAY['squares_1']::text[], 'binary') AS res (x int, x2 int); - count -------- + count +--------------------------------------------------------------------- 3 (1 row) SELECT count(*) FROM read_intermediate_results(ARRAY[]::text[], 'binary') AS res (x int, x2 int); - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -356,8 +356,8 @@ SELECT broadcast_intermediate_result('stored_squares_1', 'SELECT s, s*s, ROW(1::text, 2) FROM generate_series(1,3) s'), broadcast_intermediate_result('stored_squares_2', 'SELECT s, s*s, ROW(2::text, 3) FROM generate_series(4,6) s'); - broadcast_intermediate_result | broadcast_intermediate_result --------------------------------+------------------------------- + broadcast_intermediate_result | broadcast_intermediate_result +--------------------------------------------------------------------- 3 | 3 (1 row) @@ -367,8 +367,8 @@ SELECT * FROM interesting_squares JOIN ( read_intermediate_results(ARRAY['stored_squares_1', 'stored_squares_2'], 'binary') AS res (x int, x2 int, z intermediate_results.square_type) ) squares ON (squares.x::text = interested_in) WHERE user_id = 'jon' ORDER BY 1,2; - user_id | interested_in | x | x2 | z ----------+---------------+---+----+------- + user_id | interested_in | x | x2 | z +--------------------------------------------------------------------- jon | 2 | 2 | 4 | (1,2) jon | 5 | 5 | 25 | (2,3) (2 rows) @@ -379,41 +379,41 @@ BEGIN; -- almost accurate row count estimates for primitive types SELECT create_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1,632) s'), create_intermediate_result('squares_2', 'SELECT s, s*s FROM generate_series(633,1024) s'); - create_intermediate_result | create_intermediate_result -----------------------------+---------------------------- + create_intermediate_result | create_intermediate_result +--------------------------------------------------------------------- 632 | 392 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2'], 'binary') AS res (x int, x2 int); - QUERY PLAN -------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Function Scan on read_intermediate_results res (cost=0.00..7.37 rows=1024 width=8) (1 row) -- less accurate results for variable types SELECT create_intermediate_result('hellos_1', $$SELECT s, 'hello-'||s FROM generate_series(1,63) s$$), create_intermediate_result('hellos_2', $$SELECT s, 'hello-'||s FROM generate_series(64,129) s$$); - create_intermediate_result | create_intermediate_result -----------------------------+---------------------------- + create_intermediate_result | create_intermediate_result +--------------------------------------------------------------------- 63 | 66 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_results(ARRAY['hellos_1', 'hellos_2'], 'binary') AS res (x int, y text); - QUERY PLAN ------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Function Scan on read_intermediate_results res (cost=0.00..0.66 rows=62 width=36) (1 row) -- not very accurate results for text encoding SELECT create_intermediate_result('stored_squares', 'SELECT square FROM stored_squares'); - create_intermediate_result ----------------------------- + create_intermediate_result +--------------------------------------------------------------------- 4 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_results(ARRAY['stored_squares'], 'text') AS res (s intermediate_results.square_type); - QUERY PLAN ------------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Function Scan on read_intermediate_results res (cost=0.00..0.01 rows=1 width=32) (1 row) @@ -424,20 +424,20 @@ END; -- straightforward, single-result case BEGIN; SELECT broadcast_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1, 5) s'); - broadcast_intermediate_result -------------------------------- + broadcast_intermediate_result +--------------------------------------------------------------------- 5 (1 row) SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], 'localhost', :worker_2_port); - fetch_intermediate_results ----------------------------- + fetch_intermediate_results +--------------------------------------------------------------------- 111 (1 row) SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2 int); - x | x2 ----+---- + x | x2 +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 @@ -446,14 +446,14 @@ SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2 (5 rows) SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], 'localhost', :worker_1_port); - fetch_intermediate_results ----------------------------- + fetch_intermediate_results +--------------------------------------------------------------------- 111 (1 row) SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2 int); - x | x2 ----+---- + x | x2 +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 @@ -466,16 +466,16 @@ END; BEGIN; SELECT store_intermediate_result_on_node('localhost', :worker_1_port, 'squares_1', 'SELECT s, s*s FROM generate_series(1, 2) s'); - store_intermediate_result_on_node ------------------------------------ - + store_intermediate_result_on_node +--------------------------------------------------------------------- + (1 row) SELECT store_intermediate_result_on_node('localhost', :worker_1_port, 'squares_2', 'SELECT s, s*s FROM generate_series(3, 4) s'); - store_intermediate_result_on_node ------------------------------------ - + store_intermediate_result_on_node +--------------------------------------------------------------------- + (1 row) SAVEPOINT s1; @@ -485,8 +485,8 @@ ERROR: result "squares_1" does not exist ROLLBACK TO SAVEPOINT s1; -- fetch from worker 2 should fail SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_2_port); -ERROR: could not open file "base/pgsql_job_cache/10_0_200/squares_1.data": No such file or directory -CONTEXT: while executing command on localhost:57638 +ERROR: could not open file "base/pgsql_job_cache/xx_x_xxx/squares_1.data": No such file or directory +CONTEXT: while executing command on localhost:xxxxx ROLLBACK TO SAVEPOINT s1; -- still, results aren't available on coordinator yet SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'binary') AS res (x int, x2 int); @@ -494,14 +494,14 @@ ERROR: result "squares_1" does not exist ROLLBACK TO SAVEPOINT s1; -- fetch from worker 1 should succeed SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_1_port); - fetch_intermediate_results ----------------------------- + fetch_intermediate_results +--------------------------------------------------------------------- 114 (1 row) SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'binary') AS res (x int, x2 int); - x | x2 ----+---- + x | x2 +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 @@ -510,14 +510,14 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], -- fetching again should succeed SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_1_port); - fetch_intermediate_results ----------------------------- + fetch_intermediate_results +--------------------------------------------------------------------- 114 (1 row) SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'binary') AS res (x int, x2 int); - x | x2 ----+---- + x | x2 +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 @@ -527,8 +527,8 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], ROLLBACK TO SAVEPOINT s1; -- empty result id list should succeed SELECT * FROM fetch_intermediate_results(ARRAY[]::text[], 'localhost', :worker_1_port); - fetch_intermediate_results ----------------------------- + fetch_intermediate_results +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/isolation_add_node_vs_reference_table_operations.out b/src/test/regress/expected/isolation_add_node_vs_reference_table_operations.out index 56d464af6..4fa0f754a 100644 --- a/src/test/regress/expected/isolation_add_node_vs_reference_table_operations.out +++ b/src/test/regress/expected/isolation_add_node_vs_reference_table_operations.out @@ -3,27 +3,27 @@ Parsed test spec with 2 sessions starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content create_distributed_table - -step s2-load-metadata-cache: + +step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s1-begin: +step s1-begin: BEGIN; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-copy-to-reference-table: +1 +step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s1-commit: +step s1-commit: COMMIT; step s2-copy-to-reference-table: <... completed> -step s2-print-content: +step s2-print-content: SELECT nodeport, success, result FROM @@ -31,39 +31,39 @@ step s2-print-content: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 10 -57638 t 10 +57637 t 10 +57638 t 10 master_remove_node - - + + starting permutation: s2-load-metadata-cache s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content create_distributed_table - -step s2-load-metadata-cache: + +step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s2-begin: +step s2-begin: BEGIN; -step s2-copy-to-reference-table: +step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-second-worker: <... completed> -?column? +?column? -1 -step s2-print-content: +1 +step s2-print-content: SELECT nodeport, success, result FROM @@ -71,39 +71,39 @@ step s2-print-content: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 10 -57638 t 10 +57637 t 10 +57638 t 10 master_remove_node - - + + starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content create_distributed_table - -step s2-load-metadata-cache: + +step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s1-begin: +step s1-begin: BEGIN; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-insert-to-reference-table: +1 +step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); -step s1-commit: +step s1-commit: COMMIT; step s2-insert-to-reference-table: <... completed> -step s2-print-content: +step s2-print-content: SELECT nodeport, success, result FROM @@ -111,39 +111,39 @@ step s2-print-content: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 6 -57638 t 6 +57637 t 6 +57638 t 6 master_remove_node - - + + starting permutation: s2-load-metadata-cache s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content create_distributed_table - -step s2-load-metadata-cache: + +step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-to-reference-table: +step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-second-worker: <... completed> -?column? +?column? -1 -step s2-print-content: +1 +step s2-print-content: SELECT nodeport, success, result FROM @@ -151,39 +151,39 @@ step s2-print-content: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 6 -57638 t 6 +57637 t 6 +57638 t 6 master_remove_node - - + + starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count create_distributed_table - -step s2-load-metadata-cache: + +step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s1-begin: +step s1-begin: BEGIN; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-ddl-on-reference-table: +1 +step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); -step s1-commit: +step s1-commit: COMMIT; step s2-ddl-on-reference-table: <... completed> -step s2-print-index-count: +step s2-print-index-count: SELECT nodeport, success, result FROM @@ -191,39 +191,39 @@ step s2-print-index-count: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s2-load-metadata-cache s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count create_distributed_table - -step s2-load-metadata-cache: + +step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s2-begin: +step s2-begin: BEGIN; -step s2-ddl-on-reference-table: +step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-second-worker: <... completed> -?column? +?column? -1 -step s2-print-index-count: +1 +step s2-print-index-count: SELECT nodeport, success, result FROM @@ -231,42 +231,42 @@ step s2-print-index-count: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-create-reference-table-2 s1-commit s2-print-content-2 create_distributed_table - -step s2-load-metadata-cache: + +step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s1-begin: +step s1-begin: BEGIN; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-reference-table-2: +1 +step s2-create-reference-table-2: SELECT create_reference_table('test_reference_table_2'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-reference-table-2: <... completed> create_reference_table - -step s2-print-content-2: + +step s2-print-content-2: SELECT nodeport, success, result FROM @@ -274,42 +274,42 @@ step s2-print-content-2: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s2-load-metadata-cache s2-begin s2-create-reference-table-2 s1-add-second-worker s2-commit s2-print-content-2 create_distributed_table - -step s2-load-metadata-cache: + +step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s2-begin: +step s2-begin: BEGIN; -step s2-create-reference-table-2: +step s2-create-reference-table-2: SELECT create_reference_table('test_reference_table_2'); create_reference_table - -step s1-add-second-worker: + +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-second-worker: <... completed> -?column? +?column? -1 -step s2-print-content-2: +1 +step s2-print-content-2: SELECT nodeport, success, result FROM @@ -317,36 +317,36 @@ step s2-print-content-2: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-copy-to-reference-table: +1 +step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s1-commit: +step s1-commit: COMMIT; step s2-copy-to-reference-table: <... completed> -step s2-print-content: +step s2-print-content: SELECT nodeport, success, result FROM @@ -354,36 +354,36 @@ step s2-print-content: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 5 -57638 t 5 +57637 t 5 +57638 t 5 master_remove_node - - + + starting permutation: s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content create_distributed_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-copy-to-reference-table: +step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-second-worker: <... completed> -?column? +?column? -1 -step s2-print-content: +1 +step s2-print-content: SELECT nodeport, success, result FROM @@ -391,36 +391,36 @@ step s2-print-content: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 5 -57638 t 5 +57637 t 5 +57638 t 5 master_remove_node - - + + starting permutation: s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-insert-to-reference-table: +1 +step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); -step s1-commit: +step s1-commit: COMMIT; step s2-insert-to-reference-table: <... completed> -step s2-print-content: +step s2-print-content: SELECT nodeport, success, result FROM @@ -428,36 +428,36 @@ step s2-print-content: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content create_distributed_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-insert-to-reference-table: +step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-second-worker: <... completed> -?column? +?column? -1 -step s2-print-content: +1 +step s2-print-content: SELECT nodeport, success, result FROM @@ -465,36 +465,36 @@ step s2-print-content: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-ddl-on-reference-table: +1 +step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); -step s1-commit: +step s1-commit: COMMIT; step s2-ddl-on-reference-table: <... completed> -step s2-print-index-count: +step s2-print-index-count: SELECT nodeport, success, result FROM @@ -502,36 +502,36 @@ step s2-print-index-count: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count create_distributed_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-ddl-on-reference-table: +step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-second-worker: <... completed> -?column? +?column? -1 -step s2-print-index-count: +1 +step s2-print-index-count: SELECT nodeport, success, result FROM @@ -539,39 +539,39 @@ step s2-print-index-count: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s1-begin s1-add-second-worker s2-create-reference-table-2 s1-commit s2-print-content-2 create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-reference-table-2: +1 +step s2-create-reference-table-2: SELECT create_reference_table('test_reference_table_2'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-reference-table-2: <... completed> create_reference_table - -step s2-print-content-2: + +step s2-print-content-2: SELECT nodeport, success, result FROM @@ -579,39 +579,39 @@ step s2-print-content-2: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s2-begin s2-create-reference-table-2 s1-add-second-worker s2-commit s2-print-content-2 create_distributed_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-create-reference-table-2: +step s2-create-reference-table-2: SELECT create_reference_table('test_reference_table_2'); create_reference_table - -step s1-add-second-worker: + +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-second-worker: <... completed> -?column? +?column? -1 -step s2-print-content-2: +1 +step s2-print-content-2: SELECT nodeport, success, result FROM @@ -619,11 +619,11 @@ step s2-print-content-2: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + diff --git a/src/test/regress/expected/isolation_add_remove_node.out b/src/test/regress/expected/isolation_add_remove_node.out index e6d03b1a4..5203acbd8 100644 --- a/src/test/regress/expected/isolation_add_remove_node.out +++ b/src/test/regress/expected/isolation_add_remove_node.out @@ -1,634 +1,634 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-add-node-1 s2-remove-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-remove-node-1: +1 +step s2-remove-node-1: SELECT * FROM master_remove_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-remove-node-1: <... completed> master_remove_node - -step s1-show-nodes: + +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive master_remove_node starting permutation: s1-begin s1-add-node-1 s2-add-node-2 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-add-node-2: +1 +step s2-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -step s1-commit: +step s1-commit: COMMIT; step s2-add-node-2: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 t -localhost 57638 t +localhost 57637 t +localhost 57638 t master_remove_node - - + + starting permutation: s1-begin s1-add-node-1 s2-add-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-add-node-1: +1 +step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-add-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 t +localhost 57637 t master_remove_node - + starting permutation: s1-begin s1-add-node-1 s2-add-node-2 s1-abort s1-show-nodes -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-add-node-2: +1 +step s2-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -step s1-abort: +step s1-abort: ABORT; step s2-add-node-2: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57638 t +localhost 57638 t master_remove_node - + starting permutation: s1-begin s1-add-node-1 s2-add-node-1 s1-abort s1-show-nodes -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-add-node-1: +1 +step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -step s1-abort: +step s1-abort: ABORT; step s2-add-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 t +localhost 57637 t master_remove_node - + starting permutation: s1-add-node-1 s1-add-node-2 s1-begin s1-remove-node-1 s2-remove-node-2 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-node-1: +1 +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-add-node-2: +1 +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-remove-node-1: +step s1-remove-node-1: SELECT * FROM master_remove_node('localhost', 57637); master_remove_node - -step s2-remove-node-2: + +step s2-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); -step s1-commit: +step s1-commit: COMMIT; step s2-remove-node-2: <... completed> master_remove_node - -step s1-show-nodes: + +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive master_remove_node starting permutation: s1-add-node-1 s1-begin s1-remove-node-1 s2-remove-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-node-1: +1 +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-remove-node-1: +step s1-remove-node-1: SELECT * FROM master_remove_node('localhost', 57637); master_remove_node - -step s2-remove-node-1: + +step s2-remove-node-1: SELECT * FROM master_remove_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-remove-node-1: <... completed> -error in steps s1-commit s2-remove-node-1: ERROR: node at "localhost:57637" does not exist -step s1-show-nodes: +error in steps s1-commit s2-remove-node-1: ERROR: node at "localhost:xxxxx" does not exist +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive master_remove_node starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-activate-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-node-1: +1 +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-activate-node-1: +step s1-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -?column? +?column? -1 -step s2-activate-node-1: +1 +step s2-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-activate-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 t +localhost 57637 t master_remove_node - + starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-node-1: +1 +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-disable-node-1: +step s1-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -?column? +?column? -1 -step s2-disable-node-1: +1 +step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-disable-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 f +localhost 57637 f master_remove_node - + starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-activate-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-inactive-1: +1 +step s1-add-inactive-1: SELECT 1 FROM master_add_inactive_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-activate-node-1: +step s1-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -?column? +?column? -1 -step s2-activate-node-1: +1 +step s2-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-activate-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 t +localhost 57637 t master_remove_node - + starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-inactive-1: +1 +step s1-add-inactive-1: SELECT 1 FROM master_add_inactive_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-disable-node-1: +step s1-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -?column? +?column? -1 -step s2-disable-node-1: +1 +step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-disable-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 f +localhost 57637 f master_remove_node - + starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-activate-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-node-1: +1 +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-disable-node-1: +step s1-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -?column? +?column? -1 -step s2-activate-node-1: +1 +step s2-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-activate-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 t +localhost 57637 t master_remove_node - + starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-node-1: +1 +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-activate-node-1: +step s1-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -?column? +?column? -1 -step s2-disable-node-1: +1 +step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-disable-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 f +localhost 57637 f master_remove_node - + starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-activate-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-inactive-1: +1 +step s1-add-inactive-1: SELECT 1 FROM master_add_inactive_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-disable-node-1: +step s1-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -?column? +?column? -1 -step s2-activate-node-1: +1 +step s2-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-activate-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 t +localhost 57637 t master_remove_node - + starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-inactive-1: +1 +step s1-add-inactive-1: SELECT 1 FROM master_add_inactive_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-activate-node-1: +step s1-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -?column? +?column? -1 -step s2-disable-node-1: +1 +step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-disable-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 f +localhost 57637 f master_remove_node - + starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-abort s1-show-nodes -?column? +?column? -1 -step s1-add-inactive-1: +1 +step s1-add-inactive-1: SELECT 1 FROM master_add_inactive_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-activate-node-1: +step s1-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -?column? +?column? -1 -step s2-disable-node-1: +1 +step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -step s1-abort: +step s1-abort: ABORT; step s2-disable-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 f +localhost 57637 f master_remove_node - + starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-abort s1-show-nodes -?column? +?column? -1 -step s1-add-node-1: +1 +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-disable-node-1: +step s1-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -?column? +?column? -1 -step s2-disable-node-1: +1 +step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -step s1-abort: +step s1-abort: ABORT; step s2-disable-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 f +localhost 57637 f master_remove_node - + diff --git a/src/test/regress/expected/isolation_alter_role_propagation.out b/src/test/regress/expected/isolation_alter_role_propagation.out index 777a95273..4e30be626 100644 --- a/src/test/regress/expected/isolation_alter_role_propagation.out +++ b/src/test/regress/expected/isolation_alter_role_propagation.out @@ -5,28 +5,28 @@ run_command_on_workers (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") -step s1-enable-propagation: +step s1-enable-propagation: SET citus.enable_alter_role_propagation to ON; -step s2-enable-propagation: +step s2-enable-propagation: SET citus.enable_alter_role_propagation to ON; -step s1-begin: +step s1-begin: BEGIN; -step s1-alter-role-1: +step s1-alter-role-1: ALTER ROLE alter_role_1 NOSUPERUSER; -step s2-add-node: +step s2-add-node: SELECT 1 FROM master_add_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-add-node: <... completed> -?column? +?column? -1 +1 run_command_on_workers (localhost,57637,t,"DROP ROLE") @@ -37,25 +37,25 @@ run_command_on_workers (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") -step s1-enable-propagation: +step s1-enable-propagation: SET citus.enable_alter_role_propagation to ON; -step s2-enable-propagation: +step s2-enable-propagation: SET citus.enable_alter_role_propagation to ON; -step s1-begin: +step s1-begin: BEGIN; -step s1-add-node: +step s1-add-node: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-alter-role-1: +1 +step s2-alter-role-1: ALTER ROLE alter_role_1 NOSUPERUSER; -step s1-commit: +step s1-commit: COMMIT; step s2-alter-role-1: <... completed> @@ -69,22 +69,22 @@ run_command_on_workers (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") -step s1-enable-propagation: +step s1-enable-propagation: SET citus.enable_alter_role_propagation to ON; -step s2-enable-propagation: +step s2-enable-propagation: SET citus.enable_alter_role_propagation to ON; -step s1-begin: +step s1-begin: BEGIN; -step s1-alter-role-1: +step s1-alter-role-1: ALTER ROLE alter_role_1 NOSUPERUSER; -step s2-alter-role-1: +step s2-alter-role-1: ALTER ROLE alter_role_1 NOSUPERUSER; -step s1-commit: +step s1-commit: COMMIT; step s2-alter-role-1: <... completed> @@ -99,22 +99,22 @@ run_command_on_workers (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") -step s1-enable-propagation: +step s1-enable-propagation: SET citus.enable_alter_role_propagation to ON; -step s2-enable-propagation: +step s2-enable-propagation: SET citus.enable_alter_role_propagation to ON; -step s1-begin: +step s1-begin: BEGIN; -step s1-alter-role-1: +step s1-alter-role-1: ALTER ROLE alter_role_1 NOSUPERUSER; -step s2-alter-role-2: +step s2-alter-role-2: ALTER ROLE alter_role_2 NOSUPERUSER; -step s1-commit: +step s1-commit: COMMIT; run_command_on_workers diff --git a/src/test/regress/expected/isolation_append_copy_vs_all.out b/src/test/regress/expected/isolation_append_copy_vs_all.out index 1348e991d..65f8de20f 100644 --- a/src/test/regress/expected/isolation_append_copy_vs_all.out +++ b/src/test/regress/expected/isolation_append_copy_vs_all.out @@ -3,139 +3,139 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-router-select: SELECT * FROM append_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-real-time-select: SELECT * FROM append_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert: INSERT INTO append_copy VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert-select: INSERT INTO append_copy SELECT * FROM append_copy; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-update: UPDATE append_copy SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-delete: DELETE FROM append_copy WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -143,14 +143,14 @@ step s2-truncate: TRUNCATE append_copy; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -163,7 +163,7 @@ ERROR: relation "append_copy" does not exist starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -171,9 +171,9 @@ step s2-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers @@ -183,7 +183,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); step s1-begin: BEGIN; @@ -192,9 +192,9 @@ step s2-ddl-drop-index: DROP INDEX append_copy_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers @@ -204,7 +204,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -212,9 +212,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY append_copy_ind step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers @@ -224,7 +224,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -232,9 +232,9 @@ step s2-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -244,7 +244,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -253,9 +253,9 @@ step s2-ddl-drop-column: ALTER TABLE append_copy DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -265,7 +265,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -273,9 +273,9 @@ step s2-ddl-rename-column: ALTER TABLE append_copy RENAME data TO new_column; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -285,24 +285,24 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-table-size: SELECT citus_total_relation_size('append_copy'); citus_total_relation_size -32768 +32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-master-apply-delete-command s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -311,16 +311,16 @@ step s1-commit: COMMIT; step s2-master-apply-delete-command: <... completed> master_apply_delete_command -1 +1 step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -329,16 +329,16 @@ step s1-commit: COMMIT; step s2-master-drop-all-shards: <... completed> master_drop_all_shards -2 +2 step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE append_copy; step s1-create-non-distributed-table: CREATE TABLE append_copy(id integer, data text, int_data int); step s1-begin: BEGIN; @@ -348,134 +348,134 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count -0 +step s1-select-count: SELECT COUNT(*) FROM append_copy; +count + +0 starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM append_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM append_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO append_copy VALUES(0, 'k', 0); step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO append_copy SELECT * FROM append_copy; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE append_copy SET data = 'l' WHERE id = 0; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM append_copy WHERE id = 1; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE append_copy; @@ -483,14 +483,14 @@ step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && ech step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE append_copy; @@ -504,7 +504,7 @@ ERROR: relation "append_copy" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); @@ -512,9 +512,9 @@ step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && ech step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers @@ -524,7 +524,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); step s1-begin: BEGIN; @@ -533,9 +533,9 @@ step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && ech step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers @@ -545,7 +545,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; @@ -554,9 +554,9 @@ step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: missing data for column "new_column" step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -566,7 +566,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -575,9 +575,9 @@ step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && ech step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -587,7 +587,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE append_copy RENAME data TO new_column; @@ -595,9 +595,9 @@ step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && ech step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -607,71 +607,71 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('append_copy'); citus_total_relation_size -32768 +32768 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-master-apply-delete-command s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_copy WHERE id <= 4;'); master_apply_delete_command -1 +1 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); master_drop_all_shards -1 +1 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -5 +5 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE append_copy; step s1-create-non-distributed-table: CREATE TABLE append_copy(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('append_copy', 'id', 'append'); create_distributed_table - + step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -5 +5 diff --git a/src/test/regress/expected/isolation_cancellation.out b/src/test/regress/expected/isolation_cancellation.out index fc929c568..1947bd394 100644 --- a/src/test/regress/expected/isolation_cancellation.out +++ b/src/test/regress/expected/isolation_cancellation.out @@ -1,127 +1,127 @@ Parsed test spec with 2 sessions starting permutation: s1-timeout s1-sleep10000 s1-reset s1-drop -step s1-timeout: +step s1-timeout: SET statement_timeout = '100ms'; -step s1-sleep10000: +step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout -step s1-reset: +step s1-reset: RESET ALL; -step s1-drop: +step s1-drop: DROP TABLE cancel_table; starting permutation: s1-timeout s1-sleep10000 s1-reset s2-drop -step s1-timeout: +step s1-timeout: SET statement_timeout = '100ms'; -step s1-sleep10000: +step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout -step s1-reset: +step s1-reset: RESET ALL; -step s2-drop: +step s2-drop: DROP TABLE cancel_table; starting permutation: s1-timeout s1-begin s1-sleep10000 s1-rollback s1-reset s1-drop -step s1-timeout: +step s1-timeout: SET statement_timeout = '100ms'; -step s1-begin: +step s1-begin: BEGIN; -step s1-sleep10000: +step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout -step s1-rollback: +step s1-rollback: ROLLBACK; -step s1-reset: +step s1-reset: RESET ALL; -step s1-drop: +step s1-drop: DROP TABLE cancel_table; starting permutation: s1-timeout s1-begin s1-sleep10000 s1-rollback s1-reset s2-drop -step s1-timeout: +step s1-timeout: SET statement_timeout = '100ms'; -step s1-begin: +step s1-begin: BEGIN; -step s1-sleep10000: +step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout -step s1-rollback: +step s1-rollback: ROLLBACK; -step s1-reset: +step s1-reset: RESET ALL; -step s2-drop: +step s2-drop: DROP TABLE cancel_table; starting permutation: s1-timeout s1-begin s1-update1 s1-sleep10000 s1-rollback s1-reset s1-drop -step s1-timeout: +step s1-timeout: SET statement_timeout = '100ms'; -step s1-begin: +step s1-begin: BEGIN; -step s1-update1: +step s1-update1: UPDATE cancel_table SET data = '' WHERE test_id = 1; -step s1-sleep10000: +step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout -step s1-rollback: +step s1-rollback: ROLLBACK; -step s1-reset: +step s1-reset: RESET ALL; -step s1-drop: +step s1-drop: DROP TABLE cancel_table; starting permutation: s1-timeout s1-begin s1-update1 s1-sleep10000 s1-rollback s1-reset s2-drop -step s1-timeout: +step s1-timeout: SET statement_timeout = '100ms'; -step s1-begin: +step s1-begin: BEGIN; -step s1-update1: +step s1-update1: UPDATE cancel_table SET data = '' WHERE test_id = 1; -step s1-sleep10000: +step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout -step s1-rollback: +step s1-rollback: ROLLBACK; -step s1-reset: +step s1-reset: RESET ALL; -step s2-drop: +step s2-drop: DROP TABLE cancel_table; diff --git a/src/test/regress/expected/isolation_citus_dist_activity.out b/src/test/regress/expected/isolation_citus_dist_activity.out index 9d0b9e331..355c59e89 100644 --- a/src/test/regress/expected/isolation_citus_dist_activity.out +++ b/src/test/regress/expected/isolation_citus_dist_activity.out @@ -3,223 +3,223 @@ Parsed test spec with 3 sessions starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-alter-table s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback create_distributed_table - -step s1-cache-connections: + +step s1-cache-connections: SET citus.max_cached_conns_per_worker TO 4; SET citus.force_max_query_parallelization TO on; UPDATE test_table SET column2 = 0; -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s1-alter-table: +step s1-alter-table: ALTER TABLE test_table ADD COLUMN x INT; -step s2-sleep: +step s2-sleep: SELECT pg_sleep(0.5); -pg_sleep +pg_sleep - -step s2-view-dist: + +step s2-view-dist: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname ALTER TABLE test_table ADD COLUMN x INT; -coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s3-view-worker: +coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s3-view-worker: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname SELECT worker_apply_shard_ddl_command (1300004, 'public', ' ALTER TABLE test_table ADD COLUMN x INT; -')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression +')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression SELECT worker_apply_shard_ddl_command (1300003, 'public', ' ALTER TABLE test_table ADD COLUMN x INT; -')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression +')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression SELECT worker_apply_shard_ddl_command (1300002, 'public', ' ALTER TABLE test_table ADD COLUMN x INT; -')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression +')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression SELECT worker_apply_shard_ddl_command (1300001, 'public', ' ALTER TABLE test_table ADD COLUMN x INT; -')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s2-rollback: +')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s2-rollback: ROLLBACK; -step s1-commit: +step s1-commit: COMMIT; -step s3-rollback: +step s3-rollback: ROLLBACK; starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-insert s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback create_distributed_table - -step s1-cache-connections: + +step s1-cache-connections: SET citus.max_cached_conns_per_worker TO 4; SET citus.force_max_query_parallelization TO on; UPDATE test_table SET column2 = 0; -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s1-insert: +step s1-insert: INSERT INTO test_table VALUES (100, 100); -step s2-sleep: +step s2-sleep: SELECT pg_sleep(0.5); -pg_sleep +pg_sleep - -step s2-view-dist: + +step s2-view-dist: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname INSERT INTO test_table VALUES (100, 100); -coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s3-view-worker: +coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s3-view-worker: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname -INSERT INTO public.test_table_1300008 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s2-rollback: +INSERT INTO public.test_table_1300008 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s2-rollback: ROLLBACK; -step s1-commit: +step s1-commit: COMMIT; -step s3-rollback: +step s3-rollback: ROLLBACK; starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback create_distributed_table - -step s1-cache-connections: + +step s1-cache-connections: SET citus.max_cached_conns_per_worker TO 4; SET citus.force_max_query_parallelization TO on; UPDATE test_table SET column2 = 0; -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s1-select: +step s1-select: SELECT count(*) FROM test_table; -count +count -0 -step s2-sleep: +0 +step s2-sleep: SELECT pg_sleep(0.5); -pg_sleep +pg_sleep - -step s2-view-dist: + +step s2-view-dist: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname SELECT count(*) FROM test_table; -coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s3-view-worker: +coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s3-view-worker: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname -SELECT count(*) AS count FROM test_table_1300014 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression -SELECT count(*) AS count FROM test_table_1300013 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression -SELECT count(*) AS count FROM test_table_1300012 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression -SELECT count(*) AS count FROM test_table_1300011 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s2-rollback: +SELECT count(*) AS count FROM test_table_1300014 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression +SELECT count(*) AS count FROM test_table_1300013 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression +SELECT count(*) AS count FROM test_table_1300012 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression +SELECT count(*) AS count FROM test_table_1300011 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s2-rollback: ROLLBACK; -step s1-commit: +step s1-commit: COMMIT; -step s3-rollback: +step s3-rollback: ROLLBACK; starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select-router s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback create_distributed_table - -step s1-cache-connections: + +step s1-cache-connections: SET citus.max_cached_conns_per_worker TO 4; SET citus.force_max_query_parallelization TO on; UPDATE test_table SET column2 = 0; -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s1-select-router: +step s1-select-router: SELECT count(*) FROM test_table WHERE column1 = 55; -count +count -0 -step s2-sleep: +0 +step s2-sleep: SELECT pg_sleep(0.5); -pg_sleep +pg_sleep - -step s2-view-dist: + +step s2-view-dist: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname SELECT count(*) FROM test_table WHERE column1 = 55; -coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s3-view-worker: +coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s3-view-worker: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname -SELECT count(*) AS count FROM public.test_table_1300017 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s2-rollback: +SELECT count(*) AS count FROM public.test_table_1300017 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s2-rollback: ROLLBACK; -step s1-commit: +step s1-commit: COMMIT; -step s3-rollback: +step s3-rollback: ROLLBACK; diff --git a/src/test/regress/expected/isolation_cluster_management.out b/src/test/regress/expected/isolation_cluster_management.out index 336550e78..505157100 100644 --- a/src/test/regress/expected/isolation_cluster_management.out +++ b/src/test/regress/expected/isolation_cluster_management.out @@ -1,13 +1,13 @@ Parsed test spec with 1 sessions starting permutation: s1a -step s1a: +step s1a: SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -?column? +1 +?column? -1 +1 diff --git a/src/test/regress/expected/isolation_concurrent_dml.out b/src/test/regress/expected/isolation_concurrent_dml.out index 69c06723c..02cc2403d 100644 --- a/src/test/regress/expected/isolation_concurrent_dml.out +++ b/src/test/regress/expected/isolation_concurrent_dml.out @@ -3,17 +3,17 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-insert s2-update s1-commit master_create_worker_shards - -step s1-begin: + +step s1-begin: BEGIN; -step s1-insert: +step s1-insert: INSERT INTO test_concurrent_dml VALUES(1); -step s2-update: +step s2-update: UPDATE test_concurrent_dml SET data = 'blarg' WHERE test_id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update: <... completed> @@ -21,28 +21,28 @@ step s2-update: <... completed> starting permutation: s1-insert s2-update master_create_worker_shards - -step s1-insert: + +step s1-insert: INSERT INTO test_concurrent_dml VALUES(1); -step s2-update: +step s2-update: UPDATE test_concurrent_dml SET data = 'blarg' WHERE test_id = 1; starting permutation: s1-begin s1-multi-insert s2-update s1-commit master_create_worker_shards - -step s1-begin: + +step s1-begin: BEGIN; -step s1-multi-insert: +step s1-multi-insert: INSERT INTO test_concurrent_dml VALUES (1), (2); -step s2-update: +step s2-update: UPDATE test_concurrent_dml SET data = 'blarg' WHERE test_id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update: <... completed> @@ -50,39 +50,39 @@ step s2-update: <... completed> starting permutation: s1-begin s1-multi-insert s2-multi-insert-overlap s1-commit master_create_worker_shards - -step s1-begin: + +step s1-begin: BEGIN; -step s1-multi-insert: +step s1-multi-insert: INSERT INTO test_concurrent_dml VALUES (1), (2); -step s2-multi-insert-overlap: +step s2-multi-insert-overlap: INSERT INTO test_concurrent_dml VALUES (1), (4); -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s1-multi-insert s2-multi-insert s1-commit s2-commit master_create_worker_shards - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-multi-insert: +step s1-multi-insert: INSERT INTO test_concurrent_dml VALUES (1), (2); -step s2-multi-insert: +step s2-multi-insert: INSERT INTO test_concurrent_dml VALUES (3), (4); -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_copy_placement_vs_copy_placement.out b/src/test/regress/expected/isolation_copy_placement_vs_copy_placement.out index ad6366975..95a9a59cf 100644 --- a/src/test/regress/expected/isolation_copy_placement_vs_copy_placement.out +++ b/src/test/regress/expected/isolation_copy_placement_vs_copy_placement.out @@ -1,50 +1,50 @@ Parsed test spec with 2 sessions starting permutation: s1-load-cache s2-load-cache s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit -step s1-load-cache: +step s1-load-cache: COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; -step s2-load-cache: +step s2-load-cache: COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; -step s2-set-placement-inactive: +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-repair-placement: + +step s1-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-repair-placement: <... completed> error in steps s2-commit s1-repair-placement: ERROR: target placement must be in inactive state starting permutation: s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit -step s2-set-placement-inactive: +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-repair-placement: + +step s1-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-repair-placement: <... completed> diff --git a/src/test/regress/expected/isolation_copy_placement_vs_modification.out b/src/test/regress/expected/isolation_copy_placement_vs_modification.out index 4574cb055..24abf82b1 100644 --- a/src/test/regress/expected/isolation_copy_placement_vs_modification.out +++ b/src/test/regress/expected/isolation_copy_placement_vs_modification.out @@ -1,508 +1,508 @@ Parsed test spec with 2 sessions starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-update s2-commit s1-commit s2-print-content -step s1-load-cache: +step s1-load-cache: TRUNCATE test_copy_placement_vs_modification; -step s1-insert: +step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -1 -step s2-set-placement-inactive: +1 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-update: + +step s1-update: UPDATE test_copy_placement_vs_modification SET y = 5 WHERE x = 5; -step s2-commit: +step s2-commit: COMMIT; step s1-update: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-content: - SELECT - nodeport, success, result - FROM +step s2-print-content: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 5 -57638 t 5 +57637 t 5 +57638 t 5 starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-delete s2-commit s1-commit s2-print-content -step s1-load-cache: +step s1-load-cache: TRUNCATE test_copy_placement_vs_modification; -step s1-insert: +step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -1 -step s2-set-placement-inactive: +1 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-delete: + +step s1-delete: DELETE FROM test_copy_placement_vs_modification WHERE x = 5; -step s2-commit: +step s2-commit: COMMIT; step s1-delete: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-content: - SELECT - nodeport, success, result - FROM +step s2-print-content: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t -57638 t +57637 t +57638 t starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-insert s2-commit s1-commit s2-print-content -step s1-load-cache: +step s1-load-cache: TRUNCATE test_copy_placement_vs_modification; -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -0 -step s2-set-placement-inactive: +0 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-insert: + +step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); -step s2-commit: +step s2-commit: COMMIT; step s1-insert: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-content: - SELECT - nodeport, success, result - FROM +step s2-print-content: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 10 -57638 t 10 +57637 t 10 +57638 t 10 starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-copy s2-commit s1-commit s2-print-content -step s1-load-cache: +step s1-load-cache: TRUNCATE test_copy_placement_vs_modification; -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -0 -step s2-set-placement-inactive: +0 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-copy: + +step s1-copy: COPY test_copy_placement_vs_modification FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; -step s2-commit: +step s2-commit: COMMIT; step s1-copy: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-content: - SELECT - nodeport, success, result - FROM +step s2-print-content: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 5 -57638 t 5 +57637 t 5 +57638 t 5 starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-ddl s2-commit s1-commit s2-print-index-count -step s1-load-cache: +step s1-load-cache: TRUNCATE test_copy_placement_vs_modification; -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -0 -step s2-set-placement-inactive: +0 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-ddl: + +step s1-ddl: CREATE INDEX test_copy_placement_vs_modification_index ON test_copy_placement_vs_modification(x); -step s2-commit: +step s2-commit: COMMIT; step s1-ddl: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-index-count: - SELECT - nodeport, success, result - FROM +step s2-print-index-count: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select count(*) from pg_indexes WHERE tablename = ''%s''') ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57637 t 1 -57638 t 1 -57638 t 1 +57637 t 1 +57637 t 1 +57638 t 1 +57638 t 1 starting permutation: s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-update s2-commit s1-commit s2-print-content -step s1-insert: +step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -1 -step s2-set-placement-inactive: +1 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-update: + +step s1-update: UPDATE test_copy_placement_vs_modification SET y = 5 WHERE x = 5; -step s2-commit: +step s2-commit: COMMIT; step s1-update: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-content: - SELECT - nodeport, success, result - FROM +step s2-print-content: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 5 -57638 t 5 +57637 t 5 +57638 t 5 starting permutation: s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-delete s2-commit s1-commit s2-print-content -step s1-insert: +step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -1 -step s2-set-placement-inactive: +1 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-delete: + +step s1-delete: DELETE FROM test_copy_placement_vs_modification WHERE x = 5; -step s2-commit: +step s2-commit: COMMIT; step s1-delete: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-content: - SELECT - nodeport, success, result - FROM +step s2-print-content: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t -57638 t +57637 t +57638 t starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-insert s2-commit s1-commit s2-print-content -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -0 -step s2-set-placement-inactive: +0 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-insert: + +step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); -step s2-commit: +step s2-commit: COMMIT; step s1-insert: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-content: - SELECT - nodeport, success, result - FROM +step s2-print-content: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 10 -57638 t 10 +57637 t 10 +57638 t 10 starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-copy s2-commit s1-commit s2-print-content -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -0 -step s2-set-placement-inactive: +0 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-copy: + +step s1-copy: COPY test_copy_placement_vs_modification FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; -step s2-commit: +step s2-commit: COMMIT; step s1-copy: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-content: - SELECT - nodeport, success, result - FROM +step s2-print-content: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 5 -57638 t 5 +57637 t 5 +57638 t 5 starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-ddl s2-commit s1-commit s2-print-index-count -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -0 -step s2-set-placement-inactive: +0 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-ddl: + +step s1-ddl: CREATE INDEX test_copy_placement_vs_modification_index ON test_copy_placement_vs_modification(x); -step s2-commit: +step s2-commit: COMMIT; step s1-ddl: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-index-count: - SELECT - nodeport, success, result - FROM +step s2-print-index-count: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select count(*) from pg_indexes WHERE tablename = ''%s''') ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57637 t 1 -57638 t 1 -57638 t 1 +57637 t 1 +57637 t 1 +57638 t 1 +57638 t 1 diff --git a/src/test/regress/expected/isolation_copy_vs_all_on_mx.out b/src/test/regress/expected/isolation_copy_vs_all_on_mx.out index 3a59ce155..52f882f2d 100644 --- a/src/test/regress/expected/isolation_copy_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_copy_vs_all_on_mx.out @@ -1,192 +1,192 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy: + +step s1-copy: SELECT run_commands_on_session_level_connection_to_node('COPY copy_table FROM PROGRAM ''echo 5, 50 && echo 6, 60 && echo 7, 70''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy: + +step s2-copy: SELECT run_commands_on_session_level_connection_to_node('COPY copy_table FROM PROGRAM ''echo 5, 50 && echo 8, 80 && echo 9, 90''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM copy_table; -count +count -11 +11 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy: + +step s1-copy: SELECT run_commands_on_session_level_connection_to_node('COPY copy_table FROM PROGRAM ''echo 5, 50 && echo 6, 60 && echo 7, 70''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-begin: + +step s2-begin: BEGIN; -step s2-coordinator-drop: +step s2-coordinator-drop: DROP TABLE copy_table; -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-coordinator-drop: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s1-stop-connection: +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM copy_table; ERROR: relation "copy_table" does not exist restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy: + +step s1-copy: SELECT run_commands_on_session_level_connection_to_node('COPY copy_table FROM PROGRAM ''echo 5, 50 && echo 6, 60 && echo 7, 70''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM copy_table WHERE id=5 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM copy_table; -count +count -8 +8 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_create_distributed_table.out b/src/test/regress/expected/isolation_create_distributed_table.out index 51c144342..418485f80 100644 --- a/src/test/regress/expected/isolation_create_distributed_table.out +++ b/src/test/regress/expected/isolation_create_distributed_table.out @@ -1,102 +1,102 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s2-begin s1-create_distributed_table s2-create_distributed_table s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('table_to_distribute', 'id'); create_distributed_table - -step s2-create_distributed_table: + +step s2-create_distributed_table: SELECT create_distributed_table('table_to_distribute', 'id'); -step s1-commit: +step s1-commit: COMMIT; step s2-create_distributed_table: <... completed> error in steps s1-commit s2-create_distributed_table: ERROR: table "table_to_distribute" is already distributed -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-create_distributed_table s2-copy_to_local_table s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('table_to_distribute', 'id'); create_distributed_table - -step s2-copy_to_local_table: + +step s2-copy_to_local_table: COPY table_to_distribute FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3 && echo 4 && echo 5 && echo 6 && echo 7 && echo 8'; -step s1-commit: +step s1-commit: COMMIT; step s2-copy_to_local_table: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s2-copy_to_local_table s1-create_distributed_table s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-copy_to_local_table: +step s2-copy_to_local_table: COPY table_to_distribute FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3 && echo 4 && echo 5 && echo 6 && echo 7 && echo 8'; -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('table_to_distribute', 'id'); -step s2-commit: +step s2-commit: COMMIT; step s1-create_distributed_table: <... completed> create_distributed_table - -step s1-commit: + +step s1-commit: COMMIT; starting permutation: s1-copy_to_local_table s1-begin s2-begin s1-create_distributed_table s2-create_distributed_table s1-commit s2-commit -step s1-copy_to_local_table: +step s1-copy_to_local_table: COPY table_to_distribute FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3 && echo 4 && echo 5 && echo 6 && echo 7 && echo 8'; -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('table_to_distribute', 'id'); create_distributed_table - -step s2-create_distributed_table: + +step s2-create_distributed_table: SELECT create_distributed_table('table_to_distribute', 'id'); -step s1-commit: +step s1-commit: COMMIT; step s2-create_distributed_table: <... completed> error in steps s1-commit s2-create_distributed_table: ERROR: table "table_to_distribute" is already distributed -step s2-commit: +step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_create_restore_point.out b/src/test/regress/expected/isolation_create_restore_point.out index cf90b199a..c0bb77d6b 100644 --- a/src/test/regress/expected/isolation_create_restore_point.out +++ b/src/test/regress/expected/isolation_create_restore_point.out @@ -3,252 +3,252 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-create-distributed s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-create-distributed: +step s1-create-distributed: CREATE TABLE test_create_distributed_table (test_id integer NOT NULL, data text); SELECT create_distributed_table('test_create_distributed_table', 'test_id'); create_distributed_table - -step s2-create-restore: + +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s1-begin s1-insert s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-insert: +step s1-insert: INSERT INTO restore_table VALUES (1,'hello'); -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; starting permutation: s1-begin s1-modify-multiple s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-modify-multiple: +step s1-modify-multiple: UPDATE restore_table SET data = 'world'; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; starting permutation: s1-begin s1-ddl s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-ddl: +step s1-ddl: ALTER TABLE restore_table ADD COLUMN x int; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; starting permutation: s1-begin s1-copy s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-copy: +step s1-copy: COPY restore_table FROM PROGRAM 'echo 1,hello' WITH CSV; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; starting permutation: s1-begin s1-recover s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-recover: +step s1-recover: SELECT recover_prepared_transactions(); recover_prepared_transactions -0 -step s2-create-restore: +0 +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s1-begin s1-drop s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-drop: +step s1-drop: DROP TABLE restore_table; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s1-begin s1-add-node s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-add-node: +step s1-add-node: SELECT 1 FROM master_add_inactive_node('localhost', 9999); -?column? +?column? -1 -step s2-create-restore: +1 +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s1-begin s1-remove-node s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-remove-node: +step s1-remove-node: SELECT master_remove_node('localhost', 9999); master_remove_node - -step s2-create-restore: + +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s1-begin s1-create-restore s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-create-restore: +step s1-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test-2'); -?column? +?column? -1 -step s2-create-restore: +1 +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s2-begin s2-create-restore s1-modify-multiple s2-commit create_reference_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-modify-multiple: +1 +step s1-modify-multiple: UPDATE restore_table SET data = 'world'; -step s2-commit: +step s2-commit: COMMIT; step s1-modify-multiple: <... completed> @@ -256,20 +256,20 @@ step s1-modify-multiple: <... completed> starting permutation: s2-begin s2-create-restore s1-ddl s2-commit create_reference_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-ddl: +1 +step s1-ddl: ALTER TABLE restore_table ADD COLUMN x int; -step s2-commit: +step s2-commit: COMMIT; step s1-ddl: <... completed> @@ -277,24 +277,24 @@ step s1-ddl: <... completed> starting permutation: s2-begin s2-create-restore s1-multi-statement s2-commit create_reference_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-multi-statement: +1 +step s1-multi-statement: SET citus.multi_shard_commit_protocol TO '2pc'; BEGIN; INSERT INTO restore_table VALUES (1,'hello'); INSERT INTO restore_table VALUES (2,'hello'); COMMIT; -step s2-commit: +step s2-commit: COMMIT; step s1-multi-statement: <... completed> @@ -302,153 +302,153 @@ step s1-multi-statement: <... completed> starting permutation: s1-begin s1-create-reference s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-create-reference: +step s1-create-reference: CREATE TABLE test_create_reference_table (test_id integer NOT NULL, data text); SELECT create_reference_table('test_create_reference_table'); create_reference_table - -step s2-create-restore: + +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s1-begin s1-insert-ref s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-insert-ref: +step s1-insert-ref: INSERT INTO restore_ref_table VALUES (1,'hello'); -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; starting permutation: s1-begin s1-modify-multiple-ref s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-modify-multiple-ref: +step s1-modify-multiple-ref: UPDATE restore_ref_table SET data = 'world'; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; starting permutation: s1-begin s1-ddl-ref s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-ddl-ref: +step s1-ddl-ref: ALTER TABLE restore_ref_table ADD COLUMN x int; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s1-begin s1-copy-ref s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-copy-ref: +step s1-copy-ref: COPY restore_ref_table FROM PROGRAM 'echo 1,hello' WITH CSV; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; starting permutation: s1-begin s1-drop-ref s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-drop-ref: +step s1-drop-ref: DROP TABLE restore_ref_table; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s2-begin s2-create-restore s1-modify-multiple-ref s2-commit create_reference_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-modify-multiple-ref: +1 +step s1-modify-multiple-ref: UPDATE restore_ref_table SET data = 'world'; -step s2-commit: +step s2-commit: COMMIT; step s1-modify-multiple-ref: <... completed> @@ -456,20 +456,20 @@ step s1-modify-multiple-ref: <... completed> starting permutation: s2-begin s2-create-restore s1-ddl-ref s2-commit create_reference_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-ddl-ref: +1 +step s1-ddl-ref: ALTER TABLE restore_ref_table ADD COLUMN x int; -step s2-commit: +step s2-commit: COMMIT; step s1-ddl-ref: <... completed> @@ -477,24 +477,24 @@ step s1-ddl-ref: <... completed> starting permutation: s2-begin s2-create-restore s1-multi-statement-ref s2-commit create_reference_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-multi-statement-ref: +1 +step s1-multi-statement-ref: SET citus.multi_shard_commit_protocol TO '2pc'; BEGIN; INSERT INTO restore_ref_table VALUES (1,'hello'); INSERT INTO restore_ref_table VALUES (2,'hello'); COMMIT; -step s2-commit: +step s2-commit: COMMIT; step s1-multi-statement-ref: <... completed> diff --git a/src/test/regress/expected/isolation_create_table_vs_add_remove_node.out b/src/test/regress/expected/isolation_create_table_vs_add_remove_node.out index 023e320c6..ed61cf589 100644 --- a/src/test/regress/expected/isolation_create_table_vs_add_remove_node.out +++ b/src/test/regress/expected/isolation_create_table_vs_add_remove_node.out @@ -1,32 +1,32 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-add-node-2 s2-create-table-1 s1-commit s1-show-placements s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-begin: +localhost 57637 +step s1-begin: BEGIN; -step s1-add-node-2: +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-table-1: +1 +step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table-1: <... completed> create_distributed_table - -step s1-show-placements: + +step s1-show-placements: SELECT nodename, nodeport FROM @@ -36,49 +36,49 @@ step s1-show-placements: ORDER BY nodename, nodeport; -nodename nodeport +nodename nodeport -localhost 57637 -localhost 57637 -localhost 57638 -localhost 57638 -step s2-select: +localhost 57637 +localhost 57637 +localhost 57638 +localhost 57638 +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - - + + starting permutation: s1-begin s1-add-node-2 s2-create-table-1 s1-abort s1-show-placements s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-begin: +localhost 57637 +step s1-begin: BEGIN; -step s1-add-node-2: +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-table-1: +1 +step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); -step s1-abort: +step s1-abort: ABORT; step s2-create-table-1: <... completed> create_distributed_table - -step s1-show-placements: + +step s1-show-placements: SELECT nodename, nodeport FROM @@ -88,29 +88,29 @@ step s1-show-placements: ORDER BY nodename, nodeport; -nodename nodeport +nodename nodeport -localhost 57637 -localhost 57637 -localhost 57637 -localhost 57637 -step s2-select: +localhost 57637 +localhost 57637 +localhost 57637 +localhost 57637 +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - + starting permutation: s2-begin s2-create-table-1 s1-add-node-2 s2-commit s1-show-placements s2-select -node_name node_port +node_name node_port -localhost 57637 -step s2-begin: +localhost 57637 +step s2-begin: BEGIN; -step s2-create-table-1: +step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); @@ -118,18 +118,18 @@ step s2-create-table-1: create_distributed_table - -step s1-add-node-2: + +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-node-2: <... completed> -?column? +?column? -1 -step s1-show-placements: +1 +step s1-show-placements: SELECT nodename, nodeport FROM @@ -139,55 +139,55 @@ step s1-show-placements: ORDER BY nodename, nodeport; -nodename nodeport +nodename nodeport -localhost 57637 -localhost 57637 -localhost 57637 -localhost 57637 -step s2-select: +localhost 57637 +localhost 57637 +localhost 57637 +localhost 57637 +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - - + + starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-1 s1-commit s1-show-placements s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-add-node-2: +localhost 57637 +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-remove-node-2: +step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); master_remove_node - -step s2-create-table-1: + +step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table-1: <... completed> create_distributed_table - -step s1-show-placements: + +step s1-show-placements: SELECT nodename, nodeport FROM @@ -197,54 +197,54 @@ step s1-show-placements: ORDER BY nodename, nodeport; -nodename nodeport +nodename nodeport -localhost 57637 -localhost 57637 -localhost 57637 -localhost 57637 -step s2-select: +localhost 57637 +localhost 57637 +localhost 57637 +localhost 57637 +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - + starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-1 s1-abort s1-show-placements s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-add-node-2: +localhost 57637 +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-remove-node-2: +step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); master_remove_node - -step s2-create-table-1: + +step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); -step s1-abort: +step s1-abort: ABORT; step s2-create-table-1: <... completed> create_distributed_table - -step s1-show-placements: + +step s1-show-placements: SELECT nodename, nodeport FROM @@ -254,36 +254,36 @@ step s1-show-placements: ORDER BY nodename, nodeport; -nodename nodeport +nodename nodeport -localhost 57637 -localhost 57637 -localhost 57638 -localhost 57638 -step s2-select: +localhost 57637 +localhost 57637 +localhost 57638 +localhost 57638 +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - - + + starting permutation: s1-add-node-2 s2-begin s2-create-table-1 s1-remove-node-2 s2-commit s1-show-placements s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-add-node-2: +localhost 57637 +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-create-table-1: +step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); @@ -291,16 +291,16 @@ step s2-create-table-1: create_distributed_table - -step s1-remove-node-2: + +step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-remove-node-2: <... completed> error in steps s2-commit s1-remove-node-2: ERROR: you cannot remove the primary node of a node group which has shard placements -step s1-show-placements: +step s1-show-placements: SELECT nodename, nodeport FROM @@ -310,74 +310,74 @@ step s1-show-placements: ORDER BY nodename, nodeport; -nodename nodeport +nodename nodeport -localhost 57637 -localhost 57637 -localhost 57638 -localhost 57638 -step s2-select: +localhost 57637 +localhost 57637 +localhost 57638 +localhost 57638 +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - - + + starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-2 s1-commit s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-add-node-2: +localhost 57637 +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-remove-node-2: +step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); master_remove_node - -step s2-create-table-2: + +step s2-create-table-2: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 2; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table-2: <... completed> error in steps s1-commit s2-create-table-2: ERROR: replication_factor (2) exceeds number of worker nodes (1) -step s2-select: +step s2-select: SELECT * FROM dist_table; ERROR: relation "dist_table" does not exist master_remove_node - + starting permutation: s1-add-node-2 s2-begin s2-create-table-2 s1-remove-node-2 s2-commit s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-add-node-2: +localhost 57637 +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-create-table-2: +step s2-create-table-2: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 2; CREATE TABLE dist_table (x int, y int); @@ -385,83 +385,83 @@ step s2-create-table-2: create_distributed_table - -step s1-remove-node-2: + +step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-remove-node-2: <... completed> error in steps s2-commit s1-remove-node-2: ERROR: you cannot remove the primary node of a node group which has shard placements -step s2-select: +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - - + + starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-append-table s1-commit s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-add-node-2: +localhost 57637 +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-remove-node-2: +step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); master_remove_node - -step s2-create-append-table: + +step s2-create-append-table: SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x', 'append'); SELECT 1 FROM master_create_empty_shard('dist_table'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-append-table: <... completed> create_distributed_table - -?column? -1 -step s2-select: +?column? + +1 +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - + starting permutation: s1-add-node-2 s2-begin s2-create-append-table s1-remove-node-2 s2-commit s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-add-node-2: +localhost 57637 +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-create-append-table: +step s2-create-append-table: SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x', 'append'); @@ -469,25 +469,25 @@ step s2-create-append-table: create_distributed_table - -?column? -1 -step s1-remove-node-2: +?column? + +1 +step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-remove-node-2: <... completed> master_remove_node - -step s2-select: + +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - + diff --git a/src/test/regress/expected/isolation_data_migration.out b/src/test/regress/expected/isolation_data_migration.out index b77878c0f..fc72167db 100644 --- a/src/test/regress/expected/isolation_data_migration.out +++ b/src/test/regress/expected/isolation_data_migration.out @@ -1,151 +1,151 @@ Parsed test spec with 2 sessions starting permutation: s2-begin s2-copy s1-create_distributed_table s2-commit s2-select -step s2-begin: +step s2-begin: BEGIN; -step s2-copy: +step s2-copy: COPY migration_table FROM PROGRAM 'echo 1,hello' WITH CSV; -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); -step s2-commit: +step s2-commit: COMMIT; step s1-create_distributed_table: <... completed> create_distributed_table - -step s2-select: + +step s2-select: SELECT * FROM migration_table ORDER BY test_id; -test_id data +test_id data -1 hello +1 hello starting permutation: s1-begin s1-create_distributed_table s2-copy s1-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); create_distributed_table - -step s2-copy: + +step s2-copy: COPY migration_table FROM PROGRAM 'echo 1,hello' WITH CSV; -step s1-commit: +step s1-commit: COMMIT; step s2-copy: <... completed> -step s2-select: +step s2-select: SELECT * FROM migration_table ORDER BY test_id; -test_id data +test_id data -1 hello +1 hello starting permutation: s2-begin s2-insert s1-create_distributed_table s2-commit s2-select -step s2-begin: +step s2-begin: BEGIN; -step s2-insert: +step s2-insert: INSERT INTO migration_table VALUES (1, 'hello'); -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); -step s2-commit: +step s2-commit: COMMIT; step s1-create_distributed_table: <... completed> create_distributed_table - -step s2-select: + +step s2-select: SELECT * FROM migration_table ORDER BY test_id; -test_id data +test_id data -1 hello +1 hello starting permutation: s1-begin s1-create_distributed_table s2-insert s1-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); create_distributed_table - -step s2-insert: + +step s2-insert: INSERT INTO migration_table VALUES (1, 'hello'); -step s1-commit: +step s1-commit: COMMIT; step s2-insert: <... completed> -step s2-select: +step s2-select: SELECT * FROM migration_table ORDER BY test_id; -test_id data +test_id data -1 hello +1 hello starting permutation: s1-begin-serializable s2-copy s1-create_distributed_table s1-commit s2-select -step s1-begin-serializable: +step s1-begin-serializable: BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; SELECT 1; -?column? +?column? -1 -step s2-copy: +1 +step s2-copy: COPY migration_table FROM PROGRAM 'echo 1,hello' WITH CSV; -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); create_distributed_table - -step s1-commit: + +step s1-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM migration_table ORDER BY test_id; -test_id data +test_id data -1 hello +1 hello starting permutation: s1-begin-serializable s2-insert s1-create_distributed_table s1-commit s2-select -step s1-begin-serializable: +step s1-begin-serializable: BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; SELECT 1; -?column? +?column? -1 -step s2-insert: +1 +step s2-insert: INSERT INTO migration_table VALUES (1, 'hello'); -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); create_distributed_table - -step s1-commit: + +step s1-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM migration_table ORDER BY test_id; -test_id data +test_id data -1 hello +1 hello diff --git a/src/test/regress/expected/isolation_ddl_vs_all.out b/src/test/regress/expected/isolation_ddl_vs_all.out index 31cfb46a5..fbf8677b0 100644 --- a/src/test/regress/expected/isolation_ddl_vs_all.out +++ b/src/test/regress/expected/isolation_ddl_vs_all.out @@ -3,7 +3,7 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-ddl-create-index s1-commit s2-commit s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -20,12 +20,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-ddl-create-index-concurrently s1-commit s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); @@ -40,12 +40,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-ddl-add-column s1-commit s2-commit s1-show-indexes s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -66,12 +66,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-ddl-rename-column s1-commit s2-commit s1-show-indexes s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -92,12 +92,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-create-index s1-commit s2-commit s1-show-columns s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -118,12 +118,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-ddl-create-index-concurrently s1-commit s1-show-columns s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_1 int DEFAULT 0; @@ -142,12 +142,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -163,12 +163,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -184,12 +184,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-create-index s1-commit s2-commit s1-show-columns s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -210,12 +210,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-ddl-create-index-concurrently s1-commit s1-show-columns s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; @@ -234,12 +234,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -255,12 +255,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -277,12 +277,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-table-size s1-commit s2-commit s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -290,7 +290,7 @@ step s1-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s2-table-size: SELECT citus_total_relation_size('ddl_hash'); citus_total_relation_size -57344 +57344 step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); @@ -300,12 +300,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-master-modify-multiple-shards s1-commit s2-commit s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -321,12 +321,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-create-index s2-distribute-table s1-commit s2-commit s1-show-indexes create_distributed_table - + step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -338,7 +338,7 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - + step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers @@ -347,12 +347,12 @@ run_command_on_workers (localhost,57638,t,4) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-table-size s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -362,7 +362,7 @@ step s1-commit: COMMIT; step s2-table-size: <... completed> citus_total_relation_size -57344 +57344 step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -371,12 +371,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-master-modify-multiple-shards s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -392,12 +392,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-add-column s2-distribute-table s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -409,7 +409,7 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - + step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -418,12 +418,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-table-size s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -433,7 +433,7 @@ step s1-commit: COMMIT; step s2-table-size: <... completed> citus_total_relation_size -57344 +57344 step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -442,12 +442,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-master-modify-multiple-shards s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -463,12 +463,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-distribute-table s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -480,7 +480,7 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - + step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -489,19 +489,19 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-create-index s1-commit s2-commit s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('ddl_hash'); citus_total_relation_size -57344 +57344 step s2-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s2-commit: COMMIT; @@ -512,12 +512,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-create-index s1-commit s2-commit s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -533,12 +533,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-create-index s1-commit s2-commit s1-show-indexes create_distributed_table - + step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -547,7 +547,7 @@ step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('ddl_hash', 'id'); create_distributed_table - + step s2-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> @@ -559,18 +559,18 @@ run_command_on_workers (localhost,57638,t,4) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-table-size s2-ddl-create-index-concurrently s1-commit s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('ddl_hash'); citus_total_relation_size -57344 +57344 step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); @@ -580,12 +580,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-ddl-create-index-concurrently s1-commit s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM ddl_hash; @@ -599,12 +599,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-ddl-create-index-concurrently s1-commit s1-show-indexes create_distributed_table - + step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -612,7 +612,7 @@ step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('ddl_hash', 'id'); create_distributed_table - + step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> @@ -623,19 +623,19 @@ run_command_on_workers (localhost,57638,t,4) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('ddl_hash'); citus_total_relation_size -57344 +57344 step s2-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_2 int DEFAULT 0; step s1-commit: COMMIT; step s2-commit: COMMIT; @@ -646,12 +646,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -667,12 +667,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -681,7 +681,7 @@ step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('ddl_hash', 'id'); create_distributed_table - + step s2-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_2 int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> @@ -693,19 +693,19 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('ddl_hash'); citus_total_relation_size -57344 +57344 step s2-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-commit: COMMIT; @@ -716,12 +716,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -737,12 +737,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -751,7 +751,7 @@ step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('ddl_hash', 'id'); create_distributed_table - + step s2-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> @@ -763,4 +763,4 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_delete_vs_all.out b/src/test/regress/expected/isolation_delete_vs_all.out index c3b62be11..849c8ca7e 100644 --- a/src/test/regress/expected/isolation_delete_vs_all.out +++ b/src/test/regress/expected/isolation_delete_vs_all.out @@ -3,7 +3,7 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-delete s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -13,17 +13,17 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -33,17 +33,17 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-drop s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -56,12 +56,12 @@ step s1-select-count: SELECT COUNT(*) FROM delete_hash; ERROR: relation "delete_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -71,9 +71,9 @@ step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers @@ -81,12 +81,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-delete s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX delete_hash_index ON delete_hash(id); step s1-begin: BEGIN; @@ -97,9 +97,9 @@ step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers @@ -107,12 +107,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-delete s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM delete_hash WHERE id = 4; @@ -120,9 +120,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY delete_hash_ind step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers @@ -130,12 +130,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -145,9 +145,9 @@ step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -155,12 +155,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-delete s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE delete_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -171,9 +171,9 @@ step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -181,12 +181,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -196,9 +196,9 @@ step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -206,12 +206,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-table-size s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -219,21 +219,21 @@ step s1-delete: DELETE FROM delete_hash WHERE id = 4; step s2-table-size: SELECT citus_total_relation_size('delete_hash'); citus_total_relation_size -57344 +57344 step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-delete s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE delete_hash; step s1-create-non-distributed-table: CREATE TABLE delete_hash(id integer, data text); COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -245,20 +245,20 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - + step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -8 +8 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-delete s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -268,17 +268,17 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-delete s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -292,12 +292,12 @@ step s1-select-count: SELECT COUNT(*) FROM delete_hash; ERROR: relation "delete_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-delete s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -307,9 +307,9 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers @@ -317,12 +317,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-delete s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX delete_hash_index ON delete_hash(id); step s1-begin: BEGIN; @@ -333,9 +333,9 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers @@ -343,12 +343,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -358,9 +358,9 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -368,12 +368,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE delete_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -384,9 +384,9 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -394,12 +394,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -409,9 +409,9 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -419,34 +419,34 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-delete s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('delete_hash'); citus_total_relation_size -57344 +57344 step s2-delete: DELETE FROM delete_hash WHERE id = 4; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-delete s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE delete_hash; step s1-create-non-distributed-table: CREATE TABLE delete_hash(id integer, data text); COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -455,15 +455,15 @@ step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('delete_hash', 'id'); create_distributed_table - + step s2-delete: DELETE FROM delete_hash WHERE id = 4; step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -8 +8 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_dis2ref_foreign_keys_on_mx.out b/src/test/regress/expected/isolation_dis2ref_foreign_keys_on_mx.out index 403e948f6..22ca208ae 100644 --- a/src/test/regress/expected/isolation_dis2ref_foreign_keys_on_mx.out +++ b/src/test/regress/expected/isolation_dis2ref_foreign_keys_on_mx.out @@ -1,544 +1,544 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-insert s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete: + +step s1-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert: + +step s2-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table VALUES (1, 1)'); -step s1-rollback-worker: +step s1-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - + step s2-insert: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-display: + +step s3-display: SELECT * FROM ref_table ORDER BY id, value; SELECT * FROM dist_table ORDER BY id, value; -id value +id value -1 10 -2 20 -id value +1 10 +2 20 +id value -1 1 -1 1 -2 2 +1 1 +1 1 +2 2 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete: + +step s1-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select: + +step s2-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id=1'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-display: + +step s3-display: SELECT * FROM ref_table ORDER BY id, value; SELECT * FROM dist_table ORDER BY id, value; -id value +id value -2 20 -id value +2 20 +id value -2 2 +2 2 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete: + +step s1-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-select: + +step s2-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); -step s1-rollback-worker: +step s1-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - + step s2-insert-select: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-display: + +step s3-display: SELECT * FROM ref_table ORDER BY id, value; SELECT * FROM dist_table ORDER BY id, value; -id value +id value -1 10 -2 20 -id value +1 10 +2 20 +id value -1 1 -1 1 -2 2 -2 2 +1 1 +1 1 +2 2 +2 2 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update: + +step s1-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET id=id+2 WHERE id=1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update: + +step s2-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE dist_table SET value=2 WHERE id=1'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-display: + +step s3-display: SELECT * FROM ref_table ORDER BY id, value; SELECT * FROM dist_table ORDER BY id, value; -id value +id value -2 20 -3 10 -id value +2 20 +3 10 +id value -1 2 -2 2 +1 2 +2 2 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-copy s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update: + +step s1-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET id=id+2 WHERE id=1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy: + +step s2-copy: SELECT run_commands_on_session_level_connection_to_node('COPY dist_table FROM PROGRAM ''echo 1, 1''WITH CSV'); -step s1-rollback-worker: +step s1-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - + step s2-copy: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-display: + +step s3-display: SELECT * FROM ref_table ORDER BY id, value; SELECT * FROM dist_table ORDER BY id, value; -id value +id value -1 10 -2 20 -id value +1 10 +2 20 +id value -1 1 -1 1 -2 2 +1 1 +1 1 +2 2 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update: + +step s1-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET id=id+2 WHERE id=1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE dist_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-display: + +step s3-display: SELECT * FROM ref_table ORDER BY id, value; SELECT * FROM dist_table ORDER BY id, value; -id value +id value -2 20 -3 10 -id value +2 20 +3 10 +id value restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-select-for-udpate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete: + +step s1-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-udpate: + +step s2-select-for-udpate: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id=1 FOR UPDATE'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-select-for-udpate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-display: + +step s3-display: SELECT * FROM ref_table ORDER BY id, value; SELECT * FROM dist_table ORDER BY id, value; -id value +id value -2 20 -id value +2 20 +id value -2 2 +2 2 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_distributed_deadlock_detection.out b/src/test/regress/expected/isolation_distributed_deadlock_detection.out index 182ad05c1..ad08e5ff7 100644 --- a/src/test/regress/expected/isolation_distributed_deadlock_detection.out +++ b/src/test/regress/expected/isolation_distributed_deadlock_detection.out @@ -1,958 +1,958 @@ Parsed test spec with 7 sessions starting permutation: s1-begin s2-begin s1-update-1 s2-update-2 s2-update-1 deadlock-checker-call s1-update-2 deadlock-checker-call s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s2-update-1: +step s2-update-1: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s1-update-2: +f +step s1-update-2: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-update-1: <... completed> step s1-update-2: <... completed> error in steps deadlock-checker-call s2-update-1 s1-update-2: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-update-1-rep-2 s2-update-2-rep-2 s2-update-1-rep-2 deadlock-checker-call s1-update-2-rep-2 deadlock-checker-call s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-1-rep-2: +step s1-update-1-rep-2: UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 1; -step s2-update-2-rep-2: +step s2-update-2-rep-2: UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 2; -step s2-update-1-rep-2: +step s2-update-1-rep-2: UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 1; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s1-update-2-rep-2: +f +step s1-update-2-rep-2: UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 2; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-update-1-rep-2: <... completed> step s1-update-2-rep-2: <... completed> error in steps deadlock-checker-call s2-update-1-rep-2 s1-update-2-rep-2: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-set-2pc s2-set-2pc s1-update-1 s2-update-2 s2-update-1 deadlock-checker-call s1-update-2 deadlock-checker-call s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-set-2pc: +step s1-set-2pc: set citus.multi_shard_commit_protocol TO '2pc'; -step s2-set-2pc: +step s2-set-2pc: set citus.multi_shard_commit_protocol TO '2pc'; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s2-update-1: +step s2-update-1: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s1-update-2: +f +step s1-update-2: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-update-1: <... completed> step s1-update-2: <... completed> error in steps deadlock-checker-call s2-update-1 s1-update-2: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-update-1 s2-update-2 s1-update-2 deadlock-checker-call s2-upsert-select-all deadlock-checker-call s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s1-update-2: +step s1-update-2: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s2-upsert-select-all: +f +step s2-upsert-select-all: INSERT INTO deadlock_detection_test SELECT * FROM deadlock_detection_test ON CONFLICT(user_id) DO UPDATE SET some_val = deadlock_detection_test.some_val + 5 RETURNING *; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s1-update-2: <... completed> step s2-upsert-select-all: <... completed> error in steps deadlock-checker-call s1-update-2 s2-upsert-select-all: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-update-1 s2-update-2 s1-update-2 deadlock-checker-call s2-ddl deadlock-checker-call s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s1-update-2: +step s1-update-2: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s2-ddl: +f +step s2-ddl: ALTER TABLE deadlock_detection_test ADD COLUMN test_col INT; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s1-update-2: <... completed> step s2-ddl: <... completed> error in steps deadlock-checker-call s1-update-2 s2-ddl: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-insert-dist-10 s2-insert-local-10 s2-insert-dist-10 s1-insert-local-10 deadlock-checker-call s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-insert-dist-10: +step s1-insert-dist-10: INSERT INTO deadlock_detection_test VALUES (10, 10); -step s2-insert-local-10: +step s2-insert-local-10: INSERT INTO local_deadlock_table VALUES (10, 10); -step s2-insert-dist-10: +step s2-insert-dist-10: INSERT INTO deadlock_detection_test VALUES (10, 10); -step s1-insert-local-10: +step s1-insert-local-10: INSERT INTO local_deadlock_table VALUES (10, 10); -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-insert-dist-10: <... completed> step s1-insert-local-10: <... completed> error in steps deadlock-checker-call s2-insert-dist-10 s1-insert-local-10: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s2-insert-ref-10 s1-insert-ref-11 s2-insert-ref-11 s1-insert-ref-10 deadlock-checker-call s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-ref-10: +step s2-insert-ref-10: INSERT INTO deadlock_detection_reference VALUES (10, 10); -step s1-insert-ref-11: +step s1-insert-ref-11: INSERT INTO deadlock_detection_reference VALUES (11, 11); -step s2-insert-ref-11: +step s2-insert-ref-11: INSERT INTO deadlock_detection_reference VALUES (11, 11); -step s1-insert-ref-10: +step s1-insert-ref-10: INSERT INTO deadlock_detection_reference VALUES (10, 10); -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-insert-ref-11: <... completed> step s1-insert-ref-10: <... completed> error in steps deadlock-checker-call s2-insert-ref-11 s1-insert-ref-10: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s2-insert-ref-10 s1-update-1 deadlock-checker-call s2-update-1 s1-insert-ref-10 deadlock-checker-call s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-ref-10: +step s2-insert-ref-10: INSERT INTO deadlock_detection_reference VALUES (10, 10); -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s2-update-1: +f +step s2-update-1: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1; -step s1-insert-ref-10: +step s1-insert-ref-10: INSERT INTO deadlock_detection_reference VALUES (10, 10); -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-update-1: <... completed> step s1-insert-ref-10: <... completed> error in steps deadlock-checker-call s2-update-1 s1-insert-ref-10: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s1-update-1 s2-update-2 s3-update-3 deadlock-checker-call s1-update-2 s2-update-3 s3-update-1 deadlock-checker-call s3-commit s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s3-update-3: +step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s1-update-2: +f +step s1-update-2: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2; -step s2-update-3: +step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; -step s3-update-1: +step s3-update-1: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 1; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-update-3: <... completed> step s3-update-1: <... completed> error in steps deadlock-checker-call s2-update-3 s3-update-1: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s3-commit: +step s3-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; step s1-update-2: <... completed> -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s2-update-1 s1-update-1 s2-update-2 s3-update-3 s3-update-2 deadlock-checker-call s2-update-3 deadlock-checker-call s3-commit s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s2-update-1: +step s2-update-1: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s3-update-3: +step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; -step s3-update-2: +step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s2-update-3: +f +step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s3-update-2: <... completed> step s2-update-3: <... completed> error in steps deadlock-checker-call s3-update-2 s2-update-3: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s3-commit: +step s3-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; step s1-update-1: <... completed> -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s1-update-1 s2-update-2 s3-update-3 s3-update-2 deadlock-checker-call s4-update-4 s2-update-3 deadlock-checker-call s3-commit s2-commit s1-commit s4-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s4-begin: +step s4-begin: BEGIN; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s3-update-3: +step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; -step s3-update-2: +step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s4-update-4: +f +step s4-update-4: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4; -step s2-update-3: +step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s3-update-2: <... completed> step s2-update-3: <... completed> error in steps deadlock-checker-call s3-update-2 s2-update-3: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s3-commit: +step s3-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; -step s4-commit: +step s4-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s4-update-1 s1-update-1 deadlock-checker-call s2-update-2 s3-update-3 s2-update-3 s3-update-2 deadlock-checker-call s3-commit s2-commit s4-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s4-begin: +step s4-begin: BEGIN; -step s4-update-1: +step s4-update-1: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 1; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s2-update-2: +f +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s3-update-3: +step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; -step s2-update-3: +step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; -step s3-update-2: +step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-update-3: <... completed> step s3-update-2: <... completed> error in steps deadlock-checker-call s2-update-3 s3-update-2: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s3-commit: +step s3-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; -step s4-commit: +step s4-commit: COMMIT; step s1-update-1: <... completed> -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s1-update-1 s4-update-4 s2-update-2 s3-update-3 s3-update-2 s4-update-1 s1-update-4 deadlock-checker-call s1-commit s4-commit s2-update-3 deadlock-checker-call s2-commit s3-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s4-begin: +step s4-begin: BEGIN; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s4-update-4: +step s4-update-4: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s3-update-3: +step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; -step s3-update-2: +step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; -step s4-update-1: +step s4-update-1: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 1; -step s1-update-4: +step s1-update-4: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 4; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s4-update-1: <... completed> step s1-update-4: <... completed> error in steps deadlock-checker-call s4-update-1 s1-update-4: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s4-commit: +step s4-commit: COMMIT; -step s2-update-3: +step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s3-update-2: <... completed> step s2-update-3: <... completed> error in steps deadlock-checker-call s3-update-2 s2-update-3: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s2-commit: +step s2-commit: COMMIT; -step s3-commit: +step s3-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s5-begin s6-begin s1-update-1 s5-update-5 s3-update-2 s2-update-3 s4-update-4 s3-update-4 deadlock-checker-call s6-update-6 s4-update-6 s1-update-5 s5-update-1 deadlock-checker-call s1-commit s5-commit s6-commit s4-commit s3-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s4-begin: +step s4-begin: BEGIN; -step s5-begin: +step s5-begin: BEGIN; -step s6-begin: +step s6-begin: BEGIN; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s5-update-5: +step s5-update-5: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 5; -step s3-update-2: +step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; -step s2-update-3: +step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; -step s4-update-4: +step s4-update-4: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4; -step s3-update-4: +step s3-update-4: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 4; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s6-update-6: +f +step s6-update-6: UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 6; -step s4-update-6: +step s4-update-6: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 6; -step s1-update-5: +step s1-update-5: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 5; -step s5-update-1: +step s5-update-1: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 1; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s1-update-5: <... completed> step s5-update-1: <... completed> error in steps deadlock-checker-call s1-update-5 s5-update-1: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s5-commit: +step s5-commit: COMMIT; -step s6-commit: +step s6-commit: COMMIT; step s4-update-6: <... completed> -step s4-commit: +step s4-commit: COMMIT; step s3-update-4: <... completed> -step s3-commit: +step s3-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s5-begin s6-begin s6-update-6 s5-update-5 s5-update-6 s4-update-4 s1-update-4 s4-update-5 deadlock-checker-call s2-update-3 s3-update-2 s2-update-2 s3-update-3 deadlock-checker-call s6-commit s5-commit s4-commit s1-commit s3-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s4-begin: +step s4-begin: BEGIN; -step s5-begin: +step s5-begin: BEGIN; -step s6-begin: +step s6-begin: BEGIN; -step s6-update-6: +step s6-update-6: UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 6; -step s5-update-5: +step s5-update-5: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 5; -step s5-update-6: +step s5-update-6: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 6; -step s4-update-4: +step s4-update-4: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4; -step s1-update-4: +step s1-update-4: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 4; -step s4-update-5: +step s4-update-5: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 5; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s2-update-3: +f +step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; -step s3-update-2: +step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s3-update-3: +step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-update-2: <... completed> step s3-update-3: <... completed> error in steps deadlock-checker-call s2-update-2 s3-update-3: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s6-commit: +step s6-commit: COMMIT; step s5-update-6: <... completed> -step s5-commit: +step s5-commit: COMMIT; step s4-update-5: <... completed> -step s4-commit: +step s4-commit: COMMIT; step s1-update-4: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s3-commit: +step s3-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s5-begin s6-begin s5-update-5 s3-update-2 s2-update-2 s4-update-4 s3-update-4 s4-update-5 s1-update-4 deadlock-checker-call s6-update-6 s5-update-6 s6-update-5 deadlock-checker-call s5-commit s6-commit s4-commit s3-commit s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s4-begin: +step s4-begin: BEGIN; -step s5-begin: +step s5-begin: BEGIN; -step s6-begin: +step s6-begin: BEGIN; -step s5-update-5: +step s5-update-5: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 5; -step s3-update-2: +step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s4-update-4: +step s4-update-4: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4; -step s3-update-4: +step s3-update-4: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 4; -step s4-update-5: +step s4-update-5: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 5; -step s1-update-4: +step s1-update-4: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 4; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s6-update-6: +f +step s6-update-6: UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 6; -step s5-update-6: +step s5-update-6: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 6; -step s6-update-5: +step s6-update-5: UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 5; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s5-update-6: <... completed> step s6-update-5: <... completed> error in steps deadlock-checker-call s5-update-6 s6-update-5: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s5-commit: +step s5-commit: COMMIT; step s4-update-5: <... completed> -step s6-commit: +step s6-commit: COMMIT; -step s4-commit: +step s4-commit: COMMIT; step s3-update-4: <... completed> -step s3-commit: +step s3-commit: COMMIT; step s2-update-2: <... completed> step s1-update-4: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s5-begin s1-update-1 s3-update-3 s2-update-4 s2-update-3 s4-update-2 s5-random-adv-lock s4-random-adv-lock s3-update-1 s1-update-2-4 deadlock-checker-call deadlock-checker-call s5-commit s4-commit s2-commit s1-commit s3-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s4-begin: +step s4-begin: BEGIN; -step s5-begin: +step s5-begin: BEGIN; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s3-update-3: +step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; -step s2-update-4: +step s2-update-4: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 4; -step s2-update-3: +step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; -step s4-update-2: +step s4-update-2: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 2; -step s5-random-adv-lock: +step s5-random-adv-lock: SELECT pg_advisory_xact_lock(8765); pg_advisory_xact_lock - -step s4-random-adv-lock: + +step s4-random-adv-lock: SELECT pg_advisory_xact_lock(8765); -step s3-update-1: +step s3-update-1: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 1; -step s1-update-2-4: +step s1-update-2-4: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2 OR user_id = 4; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-update-3: <... completed> error in steps deadlock-checker-call s2-update-3: ERROR: canceling the transaction since it was involved in a distributed deadlock -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s5-commit: +f +step s5-commit: COMMIT; step s4-random-adv-lock: <... completed> pg_advisory_xact_lock - -step s4-commit: + +step s4-commit: COMMIT; step s1-update-2-4: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; step s3-update-1: <... completed> -step s3-commit: +step s3-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_distributed_transaction_id.out b/src/test/regress/expected/isolation_distributed_transaction_id.out index cd6e9f130..8a9bfe565 100644 --- a/src/test/regress/expected/isolation_distributed_transaction_id.out +++ b/src/test/regress/expected/isolation_distributed_transaction_id.out @@ -1,63 +1,63 @@ Parsed test spec with 3 sessions starting permutation: s1-begin s1-assign-transaction-id s1-get-all-transactions s2-begin s2-assign-transaction-id s2-get-all-transactions s3-begin s3-assign-transaction-id s3-get-all-transactions s1-commit s2-commit s3-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-assign-transaction-id: +step s1-assign-transaction-id: SELECT assign_distributed_transaction_id(1, 1, '2015-01-01 00:00:00+0'); assign_distributed_transaction_id - -step s1-get-all-transactions: + +step s1-get-all-transactions: SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3; initiator_node_identifiertransaction_numbertransaction_stamp 1 1 Wed Dec 31 16:00:00 2014 PST -step s2-begin: +step s2-begin: BEGIN; -step s2-assign-transaction-id: +step s2-assign-transaction-id: SELECT assign_distributed_transaction_id(2, 2, '2015-01-02 00:00:00+0'); assign_distributed_transaction_id - -step s2-get-all-transactions: + +step s2-get-all-transactions: SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3; initiator_node_identifiertransaction_numbertransaction_stamp 2 2 Thu Jan 01 16:00:00 2015 PST -step s3-begin: +step s3-begin: BEGIN; -step s3-assign-transaction-id: +step s3-assign-transaction-id: SELECT assign_distributed_transaction_id(3, 3, '2015-01-03 00:00:00+0'); assign_distributed_transaction_id - -step s3-get-all-transactions: + +step s3-get-all-transactions: SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3; initiator_node_identifiertransaction_numbertransaction_stamp 3 3 Fri Jan 02 16:00:00 2015 PST -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; -step s3-commit: +step s3-commit: COMMIT; starting permutation: s1-create-table s1-begin s1-insert s1-verify-current-xact-is-on-worker s1-commit -step s1-create-table: +step s1-create-table: -- some tests also use distributed table CREATE TABLE distributed_transaction_id_table(some_value int, other_value int); SET citus.shard_count TO 4; @@ -65,14 +65,14 @@ step s1-create-table: create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-insert: +step s1-insert: INSERT INTO distributed_transaction_id_table VALUES (1, 1); -step s1-verify-current-xact-is-on-worker: +step s1-verify-current-xact-is-on-worker: SELECT remote.nodeport, remote.result = row(xact.initiator_node_identifier, xact.transaction_number)::text AS xact_exists @@ -84,39 +84,39 @@ step s1-verify-current-xact-is-on-worker: $$) as remote ORDER BY remote.nodeport ASC; -nodeport xact_exists +nodeport xact_exists -57637 t -57638 t -step s1-commit: +57637 t +57638 t +step s1-commit: COMMIT; starting permutation: s1-begin s1-assign-transaction-id s1-has-transaction-number s2-vacuum s1-has-transaction-number s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-assign-transaction-id: +step s1-assign-transaction-id: SELECT assign_distributed_transaction_id(1, 1, '2015-01-01 00:00:00+0'); assign_distributed_transaction_id - -step s1-has-transaction-number: + +step s1-has-transaction-number: SELECT transaction_number > 0 FROM get_current_transaction_id(); -?column? +?column? -t -step s2-vacuum: +t +step s2-vacuum: VACUUM FULL pg_dist_partition; -step s1-has-transaction-number: +step s1-has-transaction-number: SELECT transaction_number > 0 FROM get_current_transaction_id(); -?column? +?column? -t -step s1-commit: +t +step s1-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_dml_vs_repair.out b/src/test/regress/expected/isolation_dml_vs_repair.out index 193a28897..499632c66 100644 --- a/src/test/regress/expected/isolation_dml_vs_repair.out +++ b/src/test/regress/expected/isolation_dml_vs_repair.out @@ -3,202 +3,202 @@ Parsed test spec with 2 sessions starting permutation: s2-invalidate-57637 s1-begin s1-insertone s2-repair s1-commit master_create_worker_shards - -step s2-invalidate-57637: + +step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; -step s1-begin: +step s1-begin: BEGIN; -step s1-insertone: +step s1-insertone: INSERT INTO test_dml_vs_repair VALUES(1, 1); -step s2-repair: +step s2-repair: SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-repair: <... completed> master_copy_shard_placement - + starting permutation: s1-insertone s2-invalidate-57637 s1-begin s1-insertall s2-repair s1-commit master_create_worker_shards - -step s1-insertone: + +step s1-insertone: INSERT INTO test_dml_vs_repair VALUES(1, 1); -step s2-invalidate-57637: +step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; -step s1-begin: +step s1-begin: BEGIN; -step s1-insertall: +step s1-insertall: INSERT INTO test_dml_vs_repair SELECT test_id, data+1 FROM test_dml_vs_repair; -step s2-repair: +step s2-repair: SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-repair: <... completed> master_copy_shard_placement - + starting permutation: s2-invalidate-57637 s2-begin s2-repair s1-insertone s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display master_create_worker_shards - -step s2-invalidate-57637: + +step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair: +step s2-repair: SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); master_copy_shard_placement - -step s1-insertone: + +step s1-insertone: INSERT INTO test_dml_vs_repair VALUES(1, 1); -step s2-commit: +step s2-commit: COMMIT; step s1-insertone: <... completed> -step s2-invalidate-57638: +step s2-invalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; -step s1-display: +step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; -test_id data +test_id data -1 1 -step s2-invalidate-57637: +1 1 +step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; -step s2-revalidate-57638: +step s2-revalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; -step s1-display: +step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; -test_id data +test_id data -1 1 +1 1 starting permutation: s2-invalidate-57637 s1-prepared-insertone s2-begin s2-repair s1-prepared-insertone s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display master_create_worker_shards - -step s2-invalidate-57637: + +step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; -step s1-prepared-insertone: +step s1-prepared-insertone: EXECUTE insertone; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair: +step s2-repair: SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); master_copy_shard_placement - -step s1-prepared-insertone: + +step s1-prepared-insertone: EXECUTE insertone; -step s2-commit: +step s2-commit: COMMIT; step s1-prepared-insertone: <... completed> -step s2-invalidate-57638: +step s2-invalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; -step s1-display: +step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; -test_id data +test_id data -1 1 -1 1 -step s2-invalidate-57637: +1 1 +1 1 +step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; -step s2-revalidate-57638: +step s2-revalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; -step s1-display: +step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; -test_id data +test_id data -1 1 -1 1 +1 1 +1 1 starting permutation: s2-invalidate-57637 s1-insertone s1-prepared-insertall s2-begin s2-repair s1-prepared-insertall s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display master_create_worker_shards - -step s2-invalidate-57637: + +step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; -step s1-insertone: +step s1-insertone: INSERT INTO test_dml_vs_repair VALUES(1, 1); -step s1-prepared-insertall: +step s1-prepared-insertall: EXECUTE insertall; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair: +step s2-repair: SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); master_copy_shard_placement - -step s1-prepared-insertall: + +step s1-prepared-insertall: EXECUTE insertall; -step s2-commit: +step s2-commit: COMMIT; step s1-prepared-insertall: <... completed> -step s2-invalidate-57638: +step s2-invalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; -step s1-display: +step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; -test_id data +test_id data -1 1 -1 2 -1 2 -1 3 -step s2-invalidate-57637: +1 1 +1 2 +1 2 +1 3 +step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; -step s2-revalidate-57638: +step s2-revalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; -step s1-display: +step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; -test_id data +test_id data -1 1 -1 2 -1 2 -1 3 +1 1 +1 2 +1 2 +1 3 diff --git a/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out b/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out index 499ca18f2..10c988f33 100644 --- a/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out +++ b/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out @@ -1,218 +1,218 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-alter s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table VALUES(5, 55)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-alter: + +step s2-alter: ALTER TABLE dist_table DROP value; -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-alter: <... completed> -step s2-commit-worker: +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -6 +6 restore_isolation_tester_func - + starting permutation: s1-begin s1-index s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit s2-commit-worker s2-stop-connection -step s1-begin: +step s1-begin: BEGIN; -step s1-index: +step s1-index: CREATE INDEX dist_table_index ON dist_table (id); -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s1-commit: + +step s1-commit: COMMIT; -step s2-commit-worker: +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-select-for-update: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-coordinator-create-index-concurrently: + +step s2-coordinator-create-index-concurrently: CREATE INDEX CONCURRENTLY dist_table_index_conc ON dist_table(id); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_drop_shards.out b/src/test/regress/expected/isolation_drop_shards.out index d007f502c..4f0a91d48 100644 --- a/src/test/regress/expected/isolation_drop_shards.out +++ b/src/test/regress/expected/isolation_drop_shards.out @@ -1,242 +1,242 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-drop-all-shards s2-truncate s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-drop-all-shards: +step s1-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); master_drop_all_shards -16 -step s2-truncate: +16 +step s2-truncate: TRUNCATE append_table; -step s1-commit: +step s1-commit: COMMIT; step s2-truncate: <... completed> starting permutation: s1-begin s1-drop-all-shards s2-apply-delete-command s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-drop-all-shards: +step s1-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); master_drop_all_shards -16 -step s2-apply-delete-command: +16 +step s2-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); -step s1-commit: +step s1-commit: COMMIT; step s2-apply-delete-command: <... completed> master_apply_delete_command -0 +0 starting permutation: s1-begin s1-drop-all-shards s2-drop-all-shards s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-drop-all-shards: +step s1-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); master_drop_all_shards -16 -step s2-drop-all-shards: +16 +step s2-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); -step s1-commit: +step s1-commit: COMMIT; step s2-drop-all-shards: <... completed> master_drop_all_shards -0 +0 starting permutation: s1-begin s1-drop-all-shards s2-select s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-drop-all-shards: +step s1-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); master_drop_all_shards -16 -step s2-select: +16 +step s2-select: SELECT * FROM append_table; -step s1-commit: +step s1-commit: COMMIT; step s2-select: <... completed> -test_id data +test_id data starting permutation: s1-begin s1-apply-delete-command s2-truncate s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-apply-delete-command: +step s1-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); master_apply_delete_command -16 -step s2-truncate: +16 +step s2-truncate: TRUNCATE append_table; -step s1-commit: +step s1-commit: COMMIT; step s2-truncate: <... completed> starting permutation: s1-begin s1-apply-delete-command s2-apply-delete-command s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-apply-delete-command: +step s1-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); master_apply_delete_command -16 -step s2-apply-delete-command: +16 +step s2-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); -step s1-commit: +step s1-commit: COMMIT; step s2-apply-delete-command: <... completed> master_apply_delete_command -0 +0 starting permutation: s1-begin s1-apply-delete-command s2-drop-all-shards s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-apply-delete-command: +step s1-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); master_apply_delete_command -16 -step s2-drop-all-shards: +16 +step s2-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); -step s1-commit: +step s1-commit: COMMIT; step s2-drop-all-shards: <... completed> master_drop_all_shards -0 +0 starting permutation: s1-begin s1-truncate s2-truncate s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-truncate: +step s1-truncate: TRUNCATE append_table; -step s2-truncate: +step s2-truncate: TRUNCATE append_table; -step s1-commit: +step s1-commit: COMMIT; step s2-truncate: <... completed> starting permutation: s1-begin s1-truncate s2-apply-delete-command s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-truncate: +step s1-truncate: TRUNCATE append_table; -step s2-apply-delete-command: +step s2-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); -step s1-commit: +step s1-commit: COMMIT; step s2-apply-delete-command: <... completed> master_apply_delete_command -0 +0 starting permutation: s1-begin s1-truncate s2-drop-all-shards s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-truncate: +step s1-truncate: TRUNCATE append_table; -step s2-drop-all-shards: +step s2-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); -step s1-commit: +step s1-commit: COMMIT; step s2-drop-all-shards: <... completed> master_drop_all_shards -0 +0 starting permutation: s1-begin s1-truncate s2-select s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-truncate: +step s1-truncate: TRUNCATE append_table; -step s2-select: +step s2-select: SELECT * FROM append_table; -step s1-commit: +step s1-commit: COMMIT; step s2-select: <... completed> -test_id data +test_id data diff --git a/src/test/regress/expected/isolation_drop_vs_all.out b/src/test/regress/expected/isolation_drop_vs_all.out index 01a661878..06c50699c 100644 --- a/src/test/regress/expected/isolation_drop_vs_all.out +++ b/src/test/regress/expected/isolation_drop_vs_all.out @@ -3,7 +3,7 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-drop s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -17,12 +17,12 @@ step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -41,12 +41,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-drop s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id); step s1-begin: BEGIN; @@ -66,12 +66,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-drop s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; @@ -88,12 +88,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -112,12 +112,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-drop s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -137,12 +137,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -161,12 +161,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-table-size s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -180,12 +180,12 @@ step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-drop s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE drop_hash; step s1-create-non-distributed-table: CREATE TABLE drop_hash(id integer, data text); COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -201,12 +201,12 @@ step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-drop s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -224,12 +224,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-drop s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id); step s1-begin: BEGIN; @@ -248,12 +248,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -271,12 +271,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -295,12 +295,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -318,19 +318,19 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-drop s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('drop_hash'); citus_total_relation_size -57344 +57344 step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-commit: COMMIT; @@ -338,12 +338,12 @@ step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-drop s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE drop_hash; step s1-create-non-distributed-table: CREATE TABLE drop_hash(id integer, data text); COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -352,7 +352,7 @@ step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('drop_hash', 'id'); create_distributed_table - + step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-drop: <... completed> @@ -361,4 +361,4 @@ step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_dump_global_wait_edges.out b/src/test/regress/expected/isolation_dump_global_wait_edges.out index 74e699f61..037be6803 100644 --- a/src/test/regress/expected/isolation_dump_global_wait_edges.out +++ b/src/test/regress/expected/isolation_dump_global_wait_edges.out @@ -1,19 +1,19 @@ Parsed test spec with 4 sessions starting permutation: s1-begin s2-begin s1-update s2-update detector-dump-wait-edges s1-abort s2-abort -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update: +step s1-update: UPDATE distributed_table SET y = 1 WHERE x = 1; -step s2-update: +step s2-update: UPDATE distributed_table SET y = 2 WHERE x = 1; -step detector-dump-wait-edges: +step detector-dump-wait-edges: SELECT waiting_transaction_num, blocking_transaction_num, @@ -28,39 +28,39 @@ step detector-dump-wait-edges: waiting_transaction_numblocking_transaction_numblocking_transaction_waiting -357 356 f +357 356 f transactionnumberwaitingtransactionnumbers -356 -357 356 -step s1-abort: +356 +357 356 +step s1-abort: ABORT; step s2-update: <... completed> -step s2-abort: +step s2-abort: ABORT; starting permutation: s1-begin s2-begin s3-begin s1-update s2-update s3-update detector-dump-wait-edges s1-abort s2-abort s3-abort -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s1-update: +step s1-update: UPDATE distributed_table SET y = 1 WHERE x = 1; -step s2-update: +step s2-update: UPDATE distributed_table SET y = 2 WHERE x = 1; -step s3-update: +step s3-update: UPDATE distributed_table SET y = 3 WHERE x = 1; -step detector-dump-wait-edges: +step detector-dump-wait-edges: SELECT waiting_transaction_num, blocking_transaction_num, @@ -75,22 +75,22 @@ step detector-dump-wait-edges: waiting_transaction_numblocking_transaction_numblocking_transaction_waiting -361 360 f -362 360 f -362 361 t +361 360 f +362 360 f +362 361 t transactionnumberwaitingtransactionnumbers -360 -361 360 -362 360,361 -step s1-abort: +360 +361 360 +362 360,361 +step s1-abort: ABORT; step s2-update: <... completed> -step s2-abort: +step s2-abort: ABORT; step s3-update: <... completed> -step s3-abort: +step s3-abort: ABORT; diff --git a/src/test/regress/expected/isolation_dump_local_wait_edges.out b/src/test/regress/expected/isolation_dump_local_wait_edges.out index 1ecaccf0f..73f770dba 100644 --- a/src/test/regress/expected/isolation_dump_local_wait_edges.out +++ b/src/test/regress/expected/isolation_dump_local_wait_edges.out @@ -1,27 +1,27 @@ Parsed test spec with 4 sessions starting permutation: dist11-begin dist13-begin dist11-update dist13-update detector-dump-wait-edges dist11-abort dist13-abort -step dist11-begin: +step dist11-begin: BEGIN; SELECT assign_distributed_transaction_id(11, 1, '2017-01-01 00:00:00+0'); assign_distributed_transaction_id - -step dist13-begin: + +step dist13-begin: BEGIN; SELECT assign_distributed_transaction_id(13, 1, '2017-01-01 00:00:00+0'); assign_distributed_transaction_id - -step dist11-update: + +step dist11-update: UPDATE local_table SET y = 1 WHERE x = 1; -step dist13-update: +step dist13-update: UPDATE local_table SET y = 3 WHERE x = 1; -step detector-dump-wait-edges: +step detector-dump-wait-edges: SELECT waiting_node_id, waiting_transaction_num, @@ -37,33 +37,33 @@ step detector-dump-wait-edges: waiting_node_idwaiting_transaction_numblocking_node_idblocking_transaction_numblocking_transaction_waiting -13 1 11 1 f -step dist11-abort: +13 1 11 1 f +step dist11-abort: ABORT; step dist13-update: <... completed> -step dist13-abort: +step dist13-abort: ABORT; starting permutation: local-begin dist13-begin local-update dist13-update detector-dump-wait-edges local-abort dist13-abort -step local-begin: +step local-begin: BEGIN; -step dist13-begin: +step dist13-begin: BEGIN; SELECT assign_distributed_transaction_id(13, 1, '2017-01-01 00:00:00+0'); assign_distributed_transaction_id - -step local-update: + +step local-update: UPDATE local_table SET y = 2 WHERE x = 1; -step dist13-update: +step dist13-update: UPDATE local_table SET y = 3 WHERE x = 1; -step detector-dump-wait-edges: +step detector-dump-wait-edges: SELECT waiting_node_id, waiting_transaction_num, @@ -79,43 +79,43 @@ step detector-dump-wait-edges: waiting_node_idwaiting_transaction_numblocking_node_idblocking_transaction_numblocking_transaction_waiting -13 1 0 f -step local-abort: +13 1 0 f +step local-abort: ABORT; step dist13-update: <... completed> -step dist13-abort: +step dist13-abort: ABORT; starting permutation: dist11-begin local-begin dist13-begin dist11-update local-update dist13-update detector-dump-wait-edges dist11-abort local-abort dist13-abort -step dist11-begin: +step dist11-begin: BEGIN; SELECT assign_distributed_transaction_id(11, 1, '2017-01-01 00:00:00+0'); assign_distributed_transaction_id - -step local-begin: + +step local-begin: BEGIN; -step dist13-begin: +step dist13-begin: BEGIN; SELECT assign_distributed_transaction_id(13, 1, '2017-01-01 00:00:00+0'); assign_distributed_transaction_id - -step dist11-update: + +step dist11-update: UPDATE local_table SET y = 1 WHERE x = 1; -step local-update: +step local-update: UPDATE local_table SET y = 2 WHERE x = 1; -step dist13-update: +step dist13-update: UPDATE local_table SET y = 3 WHERE x = 1; -step detector-dump-wait-edges: +step detector-dump-wait-edges: SELECT waiting_node_id, waiting_transaction_num, @@ -131,16 +131,16 @@ step detector-dump-wait-edges: waiting_node_idwaiting_transaction_numblocking_node_idblocking_transaction_numblocking_transaction_waiting -0 11 1 f -13 1 0 t -step dist11-abort: +0 11 1 f +13 1 0 t +step dist11-abort: ABORT; step local-update: <... completed> -step local-abort: +step local-abort: ABORT; step dist13-update: <... completed> -step dist13-abort: +step dist13-abort: ABORT; diff --git a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out index d93c3d60c..cf12b2d2e 100644 --- a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out +++ b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out @@ -1,10 +1,10 @@ Parsed test spec with 3 sessions starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -19,62 +19,62 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-public-schema: +1 +step s2-public-schema: SET search_path TO public; -step s2-create-table: +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t1', 'a'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table: <... completed> create_distributed_table - -step s2-print-distributed-objects: + +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -89,37 +89,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -134,68 +134,68 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-public-schema: +1 +step s2-public-schema: SET search_path TO public; -step s2-create-table: +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t1', 'a'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table: <... completed> create_distributed_table - -step s2-commit: + +step s2-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -210,37 +210,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-public-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -255,45 +255,45 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-public-schema: +step s2-public-schema: SET search_path TO public; -step s2-create-table: +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; @@ -302,21 +302,21 @@ step s2-create-table: create_distributed_table - -step s1-add-worker: + +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-worker: <... completed> -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -331,37 +331,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -376,63 +376,63 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-schema: +1 +step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; -step s2-create-table: +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t1', 'a'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table: <... completed> create_distributed_table - -step s2-print-distributed-objects: + +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -448,37 +448,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (schema,{myschema},{}) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -493,69 +493,69 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-schema: +1 +step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; -step s2-create-table: +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t1', 'a'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table: <... completed> create_distributed_table - -step s2-commit: + +step s2-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -571,37 +571,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (schema,{myschema},{}) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -616,46 +616,46 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-create-schema: +step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; -step s2-create-table: +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; @@ -664,21 +664,21 @@ step s2-create-table: create_distributed_table - -step s1-add-worker: + +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-worker: <... completed> -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -694,37 +694,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (schema,{myschema},{}) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s2-create-schema s1-begin s2-begin s3-begin s1-add-worker s2-create-table s3-use-schema s3-create-table s1-commit s2-commit s3-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -739,88 +739,88 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s2-create-schema: + +step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-table: +1 +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t1', 'a'); -step s3-use-schema: +step s3-use-schema: SET search_path TO myschema; -step s3-create-table: +step s3-create-table: CREATE TABLE t2 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t2', 'a'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table: <... completed> create_distributed_table - -step s2-commit: + +step s2-commit: COMMIT; step s3-create-table: <... completed> create_distributed_table - -step s3-commit: + +step s3-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -836,37 +836,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (schema,{myschema},{}) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-add-worker s2-create-schema s2-begin s3-begin s3-use-schema s2-create-table s3-create-table s2-commit s3-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -881,55 +881,55 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-add-worker: + +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-schema: +1 +step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s3-use-schema: +step s3-use-schema: SET search_path TO myschema; -step s2-create-table: +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; @@ -938,24 +938,24 @@ step s2-create-table: create_distributed_table - -step s3-create-table: + +step s3-create-table: CREATE TABLE t2 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t2', 'a'); -step s2-commit: +step s2-commit: COMMIT; step s3-create-table: <... completed> create_distributed_table - -step s3-commit: + +step s3-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -971,37 +971,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (schema,{myschema},{}) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s2-begin s3-begin s1-add-worker s2-create-schema s3-create-schema2 s2-create-table s3-create-table s1-commit s2-commit s3-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -1016,89 +1016,89 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-schema: +1 +step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; -step s3-create-schema2: +step s3-create-schema2: CREATE SCHEMA myschema2; SET search_path TO myschema2; -step s2-create-table: +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t1', 'a'); -step s3-create-table: +step s3-create-table: CREATE TABLE t2 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t2', 'a'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table: <... completed> create_distributed_table - + step s3-create-table: <... completed> create_distributed_table - -step s2-commit: + +step s2-commit: COMMIT; -step s3-commit: +step s3-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1115,37 +1115,37 @@ pg_identify_object_as_address (schema,{myschema},{}) (schema,{myschema2},{}) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-type s1-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -1160,55 +1160,55 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-public-schema: +1 +step s2-public-schema: SET search_path TO public; -step s2-create-type: +step s2-create-type: CREATE TYPE tt1 AS (a int, b int); -step s1-commit: +step s1-commit: COMMIT; step s2-create-type: <... completed> -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1224,37 +1224,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (type,{public.tt1},{}) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-create-type s1-add-worker s1-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -1269,54 +1269,54 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s2-public-schema: +step s2-public-schema: SET search_path TO public; -step s2-create-type: +step s2-create-type: CREATE TYPE tt1 AS (a int, b int); -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1332,37 +1332,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (type,{public.tt1},{}) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-type s2-create-table-with-type s1-add-worker s2-commit s1-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -1377,49 +1377,49 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-create-schema: +step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; -step s2-create-type: +step s2-create-type: CREATE TYPE tt1 AS (a int, b int); -step s2-create-table-with-type: +step s2-create-table-with-type: CREATE TABLE t1 (a int, b tt1); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; @@ -1428,21 +1428,21 @@ step s2-create-table-with-type: create_distributed_table - -step s1-add-worker: + +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-worker: <... completed> -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1459,37 +1459,37 @@ pg_identify_object_as_address (schema,{myschema},{}) (type,{myschema.tt1},{}) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-distribute-function s1-commit s2-begin s2-commit s3-wait-for-metadata-sync s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -1504,71 +1504,71 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-public-schema: +1 +step s2-public-schema: SET search_path TO public; -step s2-distribute-function: +step s2-distribute-function: CREATE OR REPLACE FUNCTION add (INT,INT) RETURNS INT AS $$ SELECT $1 + $2 $$ LANGUAGE SQL; SELECT create_distributed_function('add(INT,INT)', '$1'); -step s1-commit: +step s1-commit: COMMIT; step s2-distribute-function: <... completed> create_distributed_function - -step s2-begin: + +step s2-begin: BEGIN; -step s2-commit: +step s2-commit: COMMIT; -step s3-wait-for-metadata-sync: +step s3-wait-for-metadata-sync: SELECT public.wait_until_metadata_sync(5000); wait_until_metadata_sync - -step s2-print-distributed-objects: + +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1584,37 +1584,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (function,"{public,add}","{integer,integer}") -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-distribute-function s2-begin s2-commit s3-wait-for-metadata-sync s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -1629,76 +1629,76 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s2-public-schema: +step s2-public-schema: SET search_path TO public; -step s2-distribute-function: +step s2-distribute-function: CREATE OR REPLACE FUNCTION add (INT,INT) RETURNS INT AS $$ SELECT $1 + $2 $$ LANGUAGE SQL; SELECT create_distributed_function('add(INT,INT)', '$1'); create_distributed_function - -step s2-begin: + +step s2-begin: BEGIN; -step s2-commit: +step s2-commit: COMMIT; -step s3-wait-for-metadata-sync: +step s3-wait-for-metadata-sync: SELECT public.wait_until_metadata_sync(5000); wait_until_metadata_sync - -step s1-add-worker: + +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; -step s3-wait-for-metadata-sync: +step s3-wait-for-metadata-sync: SELECT public.wait_until_metadata_sync(5000); wait_until_metadata_sync - -step s2-print-distributed-objects: + +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1714,37 +1714,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (function,"{public,add}","{integer,integer}") -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) master_remove_node - - + + starting permutation: s1-print-distributed-objects s2-begin s2-create-schema s2-distribute-function s2-commit s3-wait-for-metadata-sync s1-begin s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -1759,77 +1759,77 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s2-begin: + +step s2-begin: BEGIN; -step s2-create-schema: +step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; -step s2-distribute-function: +step s2-distribute-function: CREATE OR REPLACE FUNCTION add (INT,INT) RETURNS INT AS $$ SELECT $1 + $2 $$ LANGUAGE SQL; SELECT create_distributed_function('add(INT,INT)', '$1'); create_distributed_function - -step s2-commit: + +step s2-commit: COMMIT; -step s3-wait-for-metadata-sync: +step s3-wait-for-metadata-sync: SELECT public.wait_until_metadata_sync(5000); wait_until_metadata_sync - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; -step s3-wait-for-metadata-sync: +step s3-wait-for-metadata-sync: SELECT public.wait_until_metadata_sync(5000); wait_until_metadata_sync - -step s2-print-distributed-objects: + +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1846,28 +1846,28 @@ pg_identify_object_as_address (function,"{myschema,add}","{integer,integer}") (schema,{myschema},{}) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) master_remove_node - - + + diff --git a/src/test/regress/expected/isolation_extension_commands.out b/src/test/regress/expected/isolation_extension_commands.out index 751eee47f..7e2176b71 100644 --- a/src/test/regress/expected/isolation_extension_commands.out +++ b/src/test/regress/expected/isolation_extension_commands.out @@ -1,35 +1,35 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-add-node-1 s2-create-extension-version-11 s1-commit s1-print -step s1-begin: +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-create-extension-version-11: +1 +step s2-create-extension-version-11: CREATE extension seg VERSION "1.1"; -step s1-commit: +step s1-commit: COMMIT; step s2-create-extension-version-11: <... completed> -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -1 -extname extversion nspname +1 +extname extversion nspname -seg 1.1 public +seg 1.1 public run_command_on_workers (localhost,57637,t,seg) @@ -44,39 +44,39 @@ run_command_on_workers (localhost,57638,t,public) master_remove_node - - + + starting permutation: s1-begin s1-add-node-1 s2-alter-extension-update-to-version-12 s1-commit s1-print -step s1-begin: +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-alter-extension-update-to-version-12: +1 +step s2-alter-extension-update-to-version-12: ALTER extension seg update to "1.2"; -step s1-commit: +step s1-commit: COMMIT; step s2-alter-extension-update-to-version-12: <... completed> -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -1 -extname extversion nspname +1 +extname extversion nspname -seg 1.2 public +seg 1.2 public run_command_on_workers (localhost,57637,t,seg) @@ -91,43 +91,43 @@ run_command_on_workers (localhost,57638,t,public) master_remove_node - - + + starting permutation: s1-add-node-1 s1-begin s1-remove-node-1 s2-drop-extension s1-commit s1-print -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-remove-node-1: +step s1-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -?column? +?column? -1 -step s2-drop-extension: +1 +step s2-drop-extension: drop extension seg; -step s1-commit: +step s1-commit: COMMIT; step s2-drop-extension: <... completed> -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -0 -extname extversion nspname +0 +extname extversion nspname run_command_on_workers @@ -140,38 +140,38 @@ run_command_on_workers (localhost,57638,t,"") master_remove_node - + starting permutation: s1-begin s1-add-node-1 s2-create-extension-with-schema1 s1-commit s1-print -step s1-begin: +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-create-extension-with-schema1: +1 +step s2-create-extension-with-schema1: CREATE extension seg with schema schema1; -step s1-commit: +step s1-commit: COMMIT; step s2-create-extension-with-schema1: <... completed> -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -2 -extname extversion nspname +2 +extname extversion nspname -seg 1.3 schema1 +seg 1.3 schema1 run_command_on_workers (localhost,57637,t,seg) @@ -186,37 +186,37 @@ run_command_on_workers (localhost,57638,t,schema1) master_remove_node - - + + starting permutation: s1-begin s1-add-node-1 s2-drop-extension s1-commit s1-print -step s1-begin: +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-drop-extension: +1 +step s2-drop-extension: drop extension seg; -step s1-commit: +step s1-commit: COMMIT; step s2-drop-extension: <... completed> -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -1 -extname extversion nspname +1 +extname extversion nspname run_command_on_workers @@ -232,48 +232,48 @@ run_command_on_workers (localhost,57638,t,"") master_remove_node - - + + starting permutation: s1-add-node-1 s1-create-extension-with-schema2 s1-begin s1-remove-node-1 s2-alter-extension-set-schema3 s1-commit s1-print -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-create-extension-with-schema2: +1 +step s1-create-extension-with-schema2: CREATE extension seg with schema schema2; -step s1-begin: +step s1-begin: BEGIN; -step s1-remove-node-1: +step s1-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -?column? +?column? -1 -step s2-alter-extension-set-schema3: +1 +step s2-alter-extension-set-schema3: alter extension seg set schema schema3; -step s1-commit: +step s1-commit: COMMIT; step s2-alter-extension-set-schema3: <... completed> -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -4 -extname extversion nspname +4 +extname extversion nspname -seg 1.3 schema3 +seg 1.3 schema3 run_command_on_workers (localhost,57638,t,seg) @@ -285,47 +285,47 @@ run_command_on_workers (localhost,57638,t,schema3) master_remove_node - + starting permutation: s1-add-node-1 s2-drop-extension s1-begin s1-remove-node-1 s2-create-extension-with-schema1 s1-commit s1-print -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-drop-extension: +1 +step s2-drop-extension: drop extension seg; -step s1-begin: +step s1-begin: BEGIN; -step s1-remove-node-1: +step s1-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -?column? +?column? -1 -step s2-create-extension-with-schema1: +1 +step s2-create-extension-with-schema1: CREATE extension seg with schema schema1; -step s1-commit: +step s1-commit: COMMIT; step s2-create-extension-with-schema1: <... completed> -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -4 -extname extversion nspname +4 +extname extversion nspname -seg 1.3 schema1 +seg 1.3 schema1 run_command_on_workers (localhost,57638,t,seg) @@ -337,52 +337,52 @@ run_command_on_workers (localhost,57638,t,schema1) master_remove_node - + starting permutation: s2-add-node-1 s2-drop-extension s2-remove-node-1 s2-begin s2-create-extension-version-11 s1-add-node-1 s2-commit s1-print -step s2-add-node-1: +step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-drop-extension: +1 +step s2-drop-extension: drop extension seg; -step s2-remove-node-1: +step s2-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-create-extension-version-11: +step s2-create-extension-version-11: CREATE extension seg VERSION "1.1"; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-commit: +1 +step s2-commit: COMMIT; -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -3 -extname extversion nspname +3 +extname extversion nspname -seg 1.1 public +seg 1.1 public run_command_on_workers (localhost,57637,t,"") @@ -397,57 +397,57 @@ run_command_on_workers (localhost,57638,t,"") master_remove_node - - + + starting permutation: s2-drop-extension s2-add-node-1 s2-create-extension-version-11 s2-remove-node-1 s2-begin s2-alter-extension-update-to-version-12 s1-add-node-1 s2-commit s1-print -step s2-drop-extension: +step s2-drop-extension: drop extension seg; -step s2-add-node-1: +step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-create-extension-version-11: +1 +step s2-create-extension-version-11: CREATE extension seg VERSION "1.1"; -step s2-remove-node-1: +step s2-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-alter-extension-update-to-version-12: +step s2-alter-extension-update-to-version-12: ALTER extension seg update to "1.2"; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -step s2-commit: +step s2-commit: COMMIT; step s1-add-node-1: <... completed> -?column? +?column? -1 -step s1-print: +1 +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -4 -extname extversion nspname +4 +extname extversion nspname -seg 1.2 public +seg 1.2 public run_command_on_workers (localhost,57637,t,seg) @@ -462,43 +462,43 @@ run_command_on_workers (localhost,57638,t,public) master_remove_node - - + + starting permutation: s2-add-node-1 s2-begin s2-drop-extension s1-remove-node-1 s2-commit s1-print -step s2-add-node-1: +step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-drop-extension: +step s2-drop-extension: drop extension seg; -step s1-remove-node-1: +step s1-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -step s2-commit: +step s2-commit: COMMIT; step s1-remove-node-1: <... completed> -?column? +?column? -1 -step s1-print: +1 +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -3 -extname extversion nspname +3 +extname extversion nspname run_command_on_workers @@ -511,37 +511,37 @@ run_command_on_workers (localhost,57638,t,"") master_remove_node - + starting permutation: s2-begin s2-create-extension-with-schema1 s1-add-node-1 s2-commit s1-print -step s2-begin: +step s2-begin: BEGIN; -step s2-create-extension-with-schema1: +step s2-create-extension-with-schema1: CREATE extension seg with schema schema1; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-commit: +1 +step s2-commit: COMMIT; -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -3 -extname extversion nspname +3 +extname extversion nspname -seg 1.3 schema1 +seg 1.3 schema1 run_command_on_workers (localhost,57637,t,"") @@ -556,51 +556,51 @@ run_command_on_workers (localhost,57638,t,"") master_remove_node - - + + starting permutation: s2-drop-extension s2-add-node-1 s2-create-extension-with-schema2 s2-begin s2-alter-extension-version-13 s1-remove-node-1 s2-commit s1-print -step s2-drop-extension: +step s2-drop-extension: drop extension seg; -step s2-add-node-1: +step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-create-extension-with-schema2: +1 +step s2-create-extension-with-schema2: CREATE extension seg with schema schema2; -step s2-begin: +step s2-begin: BEGIN; -step s2-alter-extension-version-13: +step s2-alter-extension-version-13: ALTER extension seg update to "1.3"; -step s1-remove-node-1: +step s1-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -step s2-commit: +step s2-commit: COMMIT; step s1-remove-node-1: <... completed> -?column? +?column? -1 -step s1-print: +1 +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -4 -extname extversion nspname +4 +extname extversion nspname -seg 1.3 schema2 +seg 1.3 schema2 run_command_on_workers (localhost,57638,t,seg) @@ -612,46 +612,46 @@ run_command_on_workers (localhost,57638,t,schema2) master_remove_node - + starting permutation: s2-drop-extension s2-add-node-1 s2-begin s2-create-extension-version-11 s1-remove-node-1 s2-commit s1-print -step s2-drop-extension: +step s2-drop-extension: drop extension seg; -step s2-add-node-1: +step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-create-extension-version-11: +step s2-create-extension-version-11: CREATE extension seg VERSION "1.1"; -step s1-remove-node-1: +step s1-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -?column? +?column? -1 -step s2-commit: +1 +step s2-commit: COMMIT; -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -3 -extname extversion nspname +3 +extname extversion nspname -seg 1.1 public +seg 1.1 public run_command_on_workers (localhost,57638,t,"") @@ -663,54 +663,54 @@ run_command_on_workers (localhost,57638,t,"") master_remove_node - + starting permutation: s2-drop-extension s2-add-node-1 s2-create-extension-version-11 s2-remove-node-1 s2-begin s2-drop-extension s1-add-node-1 s2-commit s1-print -step s2-drop-extension: +step s2-drop-extension: drop extension seg; -step s2-add-node-1: +step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-create-extension-version-11: +1 +step s2-create-extension-version-11: CREATE extension seg VERSION "1.1"; -step s2-remove-node-1: +step s2-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-drop-extension: +step s2-drop-extension: drop extension seg; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -step s2-commit: +step s2-commit: COMMIT; step s1-add-node-1: <... completed> -?column? +?column? -1 -step s1-print: +1 +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -3 -extname extversion nspname +3 +extname extversion nspname run_command_on_workers @@ -726,5 +726,5 @@ run_command_on_workers (localhost,57638,t,"") master_remove_node - - + + diff --git a/src/test/regress/expected/isolation_get_all_active_transactions.out b/src/test/regress/expected/isolation_get_all_active_transactions.out index 3d05d6696..3bc0437b0 100644 --- a/src/test/regress/expected/isolation_get_all_active_transactions.out +++ b/src/test/regress/expected/isolation_get_all_active_transactions.out @@ -5,79 +5,79 @@ run_command_on_workers (localhost,57637,t,"GRANT ROLE") (localhost,57638,t,"GRANT ROLE") -step s1-grant: +step s1-grant: GRANT ALL ON test_table TO test_user_1; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_1'); GRANT ALL ON test_table TO test_user_2; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); -bool_and +bool_and -t -bool_and +t +bool_and -t -step s1-begin-insert: +t +step s1-begin-insert: BEGIN; SET ROLE test_user_1; INSERT INTO test_table VALUES (100, 100); -step s2-begin-insert: +step s2-begin-insert: BEGIN; SET ROLE test_user_2; INSERT INTO test_table VALUES (200, 200); -step s3-as-admin: +step s3-as-admin: -- Admin should be able to see all transactions SELECT count(*) FROM get_all_active_transactions(); SELECT count(*) FROM get_global_active_transactions(); -count +count -2 -count +2 +count -4 -step s3-as-user-1: +4 +step s3-as-user-1: -- User should only be able to see its own transactions SET ROLE test_user_1; SELECT count(*) FROM get_all_active_transactions(); SELECT count(*) FROM get_global_active_transactions(); -count +count -1 -count +1 +count -2 -step s3-as-readonly: +2 +step s3-as-readonly: -- Other user should not see transactions SET ROLE test_readonly; SELECT count(*) FROM get_all_active_transactions(); SELECT count(*) FROM get_global_active_transactions(); -count +count -0 -count +0 +count -0 -step s3-as-monitor: +0 +step s3-as-monitor: -- Monitor should see all transactions SET ROLE test_monitor; SELECT count(*) FROM get_all_active_transactions(); SELECT count(*) FROM get_global_active_transactions(); -count +count -2 -count +2 +count -4 -step s1-commit: +4 +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; run_command_on_workers diff --git a/src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out b/src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out index 1547304b0..56db49f47 100644 --- a/src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out +++ b/src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out @@ -1,764 +1,764 @@ Parsed test spec with 4 sessions starting permutation: s1-begin s1-update-ref-table-from-coordinator s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit s2-commit-worker s2-stop-connection -step s1-begin: +step s1-begin: BEGIN; -step s1-update-ref-table-from-coordinator: +step s1-update-ref-table-from-coordinator: UPDATE ref_table SET value_1 = 15; -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port UPDATE ref_table SET value_1 = 12 WHERE user_id = 1 UPDATE ref_table SET value_1 = 15; -localhost coordinator_host57638 57636 -step s1-commit: +localhost coordinator_host57638 57636 +step s1-commit: COMMIT; step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update-ref-table: + +step s1-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE ref_table SET value_1 = 12 WHERE user_id = 1UPDATE ref_table SET value_1 = 12 WHERE user_id = 1localhost localhost 57638 57637 -step s1-commit-worker: +UPDATE ref_table SET value_1 = 12 WHERE user_id = 1UPDATE ref_table SET value_1 = 12 WHERE user_id = 1localhost localhost 57638 57637 +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-dist-table s2-start-session-level-connection s2-begin-on-worker s2-update-dist-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update-dist-table: + +step s1-update-dist-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-dist-table: + +step s2-update-dist-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 5'); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE tt1 SET value_1 = 5UPDATE tt1 SET value_1 = 4localhost localhost 57638 57637 -step s1-commit-worker: +UPDATE tt1 SET value_1 = 5UPDATE tt1 SET value_1 = 4localhost localhost 57638 57637 +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-dist-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete-from-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete-from-ref-table: + +step s1-delete-from-ref-table: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE user_id = 1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE ref_table SET value_1 = 12 WHERE user_id = 1DELETE FROM ref_table WHERE user_id = 1localhost localhost 57638 57637 -step s1-commit-worker: +UPDATE ref_table SET value_1 = 12 WHERE user_id = 1DELETE FROM ref_table WHERE user_id = 1localhost localhost 57638 57637 +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-into-ref-table: + +step s1-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE ref_table SET value_1 = 12 WHERE user_id = 1INSERT INTO ref_table VALUES(8,81),(9,91)localhost localhost 57638 57637 -step s1-commit-worker: +UPDATE ref_table SET value_1 = 12 WHERE user_id = 1INSERT INTO ref_table VALUES(8,81),(9,91)localhost localhost 57638 57637 +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-into-ref-table: + +step s1-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-into-ref-table: + +step s2-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s3-select-distributed-waiting-queries: + +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy-to-ref-table: + +step s1-copy-to-ref-table: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE ref_table SET value_1 = 12 WHERE user_id = 1COPY ref_table FROM PROGRAM 'echo 10, 101 && echo 11, 111' WITH CSVlocalhost localhost 57638 57637 -step s1-commit-worker: +UPDATE ref_table SET value_1 = 12 WHERE user_id = 1COPY ref_table FROM PROGRAM 'echo 10, 101 && echo 11, 111' WITH CSVlocalhost localhost 57638 57637 +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy-to-ref-table: + +step s1-copy-to-ref-table: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-into-ref-table: + +step s2-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s3-select-distributed-waiting-queries: + +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-copy-to-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy-to-ref-table: + +step s1-copy-to-ref-table: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy-to-ref-table: + +step s2-copy-to-ref-table: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node - -step s3-select-distributed-waiting-queries: + +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE ref_table SET value_1 = 12 WHERE user_id = 1SELECT * FROM ref_table FOR UPDATElocalhost localhost 57638 57637 -step s1-commit-worker: +UPDATE ref_table SET value_1 = 12 WHERE user_id = 1SELECT * FROM ref_table FOR UPDATElocalhost localhost 57638 57637 +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-begin s1-alter-table s3-select-distributed-waiting-queries s2-commit-worker s1-commit s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-into-ref-table: + +step s2-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s1-begin: + +step s1-begin: BEGIN; -step s1-alter-table: +step s1-alter-table: ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id); -INSERT INTO ref_table VALUES(8,81),(9,91)coordinator_hostlocalhost 57636 57638 -step s2-commit-worker: +INSERT INTO ref_table VALUES(8,81),(9,91)coordinator_hostlocalhost 57636 57638 +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s1-alter-table: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-stop-connection: +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-begin s1-update-on-the-coordinator s2-update-on-the-coordinator s3-select-distributed-waiting-queries s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-update-on-the-coordinator: +step s1-update-on-the-coordinator: UPDATE tt1 SET value_1 = 4; -step s2-update-on-the-coordinator: +step s2-update-on-the-coordinator: UPDATE tt1 SET value_1 = 4; -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port @@ -767,83 +767,83 @@ blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_ UPDATE tt1 SET value_1 = 4; UPDATE tt1 SET value_1 = 4; -coordinator_hostcoordinator_host57636 57636 -step s1-commit: +coordinator_hostcoordinator_host57636 57636 +step s1-commit: COMMIT; step s2-update-on-the-coordinator: <... completed> restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-dist-table s4-start-session-level-connection s4-begin-on-worker s4-update-dist-table s3-select-distributed-waiting-queries s1-commit-worker s4-commit-worker s1-stop-connection s4-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update-dist-table: + +step s1-update-dist-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4'); run_commands_on_session_level_connection_to_node - -step s4-start-session-level-connection: + +step s4-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s4-begin-on-worker: + +step s4-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s4-update-dist-table: + +step s4-update-dist-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 5'); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE tt1 SET value_1 = 5UPDATE tt1 SET value_1 = 4localhost localhost 57637 57637 -step s1-commit-worker: +UPDATE tt1 SET value_1 = 5UPDATE tt1 SET value_1 = 4localhost localhost 57637 57637 +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s4-update-dist-table: <... completed> run_commands_on_session_level_connection_to_node - -step s4-commit-worker: + +step s4-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s4-stop-connection: + +step s4-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_hash_copy_vs_all.out b/src/test/regress/expected/isolation_hash_copy_vs_all.out index 4566e34b4..b3a90ec0b 100644 --- a/src/test/regress/expected/isolation_hash_copy_vs_all.out +++ b/src/test/regress/expected/isolation_hash_copy_vs_all.out @@ -3,139 +3,139 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-router-select: SELECT * FROM hash_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-real-time-select: SELECT * FROM hash_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert: INSERT INTO hash_copy VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert-select: INSERT INTO hash_copy SELECT * FROM hash_copy; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-update: UPDATE hash_copy SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-delete: DELETE FROM hash_copy WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -143,14 +143,14 @@ step s2-truncate: TRUNCATE hash_copy; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -163,7 +163,7 @@ ERROR: relation "hash_copy" does not exist starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -171,9 +171,9 @@ step s2-ddl-create-index: CREATE INDEX hash_copy_index ON hash_copy(id); step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers @@ -183,7 +183,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX hash_copy_index ON hash_copy(id); step s1-begin: BEGIN; @@ -192,9 +192,9 @@ step s2-ddl-drop-index: DROP INDEX hash_copy_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers @@ -204,7 +204,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -212,9 +212,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY hash_copy_index step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers @@ -224,7 +224,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -232,9 +232,9 @@ step s2-ddl-add-column: ALTER TABLE hash_copy ADD new_column int DEFAULT 0; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -244,7 +244,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE hash_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -253,9 +253,9 @@ step s2-ddl-drop-column: ALTER TABLE hash_copy DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -265,7 +265,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -273,9 +273,9 @@ step s2-ddl-rename-column: ALTER TABLE hash_copy RENAME data TO new_column; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -285,38 +285,38 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-table-size: SELECT citus_total_relation_size('hash_copy'); citus_total_relation_size -65536 +65536 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-master-modify-multiple-shards: DELETE FROM hash_copy; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -325,16 +325,16 @@ step s1-commit: COMMIT; step s2-master-drop-all-shards: <... completed> master_drop_all_shards -4 +4 step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE hash_copy; step s1-create-non-distributed-table: CREATE TABLE hash_copy(id integer, data text, int_data int); COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; @@ -345,17 +345,17 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - -step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count -15 +step s1-select-count: SELECT COUNT(*) FROM hash_copy; +count + +15 starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_distributed_table - -step s1-recreate-with-replication-2: + +step s1-recreate-with-replication-2: DROP TABLE hash_copy; SET citus.shard_replication_factor TO 2; CREATE TABLE hash_copy(id integer, data text, int_data int); @@ -363,7 +363,7 @@ step s1-recreate-with-replication-2: create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -371,15 +371,15 @@ step s2-update: UPDATE hash_copy SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s2-update: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_distributed_table - -step s1-recreate-with-replication-2: + +step s1-recreate-with-replication-2: DROP TABLE hash_copy; SET citus.shard_replication_factor TO 2; CREATE TABLE hash_copy(id integer, data text, int_data int); @@ -387,7 +387,7 @@ step s1-recreate-with-replication-2: create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -395,15 +395,15 @@ step s2-delete: DELETE FROM hash_copy WHERE id = 1; step s1-commit: COMMIT; step s2-delete: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -9 +9 starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-recreate-with-replication-2: + +step s1-recreate-with-replication-2: DROP TABLE hash_copy; SET citus.shard_replication_factor TO 2; CREATE TABLE hash_copy(id integer, data text, int_data int); @@ -411,7 +411,7 @@ step s1-recreate-with-replication-2: create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -419,15 +419,15 @@ step s2-insert-select: INSERT INTO hash_copy SELECT * FROM hash_copy; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -20 +20 starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - -step s1-recreate-with-replication-2: + +step s1-recreate-with-replication-2: DROP TABLE hash_copy; SET citus.shard_replication_factor TO 2; CREATE TABLE hash_copy(id integer, data text, int_data int); @@ -435,7 +435,7 @@ step s1-recreate-with-replication-2: create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -443,132 +443,132 @@ step s2-master-modify-multiple-shards: DELETE FROM hash_copy; step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM hash_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM hash_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO hash_copy VALUES(0, 'k', 0); step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO hash_copy SELECT * FROM hash_copy; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE hash_copy SET data = 'l' WHERE id = 0; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM hash_copy WHERE id = 1; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE hash_copy; @@ -576,14 +576,14 @@ step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE hash_copy; @@ -597,7 +597,7 @@ ERROR: relation "hash_copy" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX hash_copy_index ON hash_copy(id); @@ -605,9 +605,9 @@ step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers @@ -617,7 +617,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX hash_copy_index ON hash_copy(id); step s1-begin: BEGIN; @@ -626,9 +626,9 @@ step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers @@ -638,7 +638,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE hash_copy ADD new_column int DEFAULT 0; @@ -647,9 +647,9 @@ step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: missing data for column "new_column" step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -659,7 +659,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE hash_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -668,9 +668,9 @@ step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -680,7 +680,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE hash_copy RENAME data TO new_column; @@ -688,9 +688,9 @@ step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -700,57 +700,57 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('hash_copy'); citus_total_relation_size -57344 +57344 step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM hash_copy; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy'); master_drop_all_shards -4 +4 step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: could not find any shards into which to copy step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE hash_copy; step s1-create-non-distributed-table: CREATE TABLE hash_copy(id integer, data text, int_data int); COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; @@ -758,11 +758,11 @@ step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('hash_copy', 'id'); create_distributed_table - + step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -15 +15 diff --git a/src/test/regress/expected/isolation_insert_select_conflict.out b/src/test/regress/expected/isolation_insert_select_conflict.out index 55fd21f6c..0c8a69760 100644 --- a/src/test/regress/expected/isolation_insert_select_conflict.out +++ b/src/test/regress/expected/isolation_insert_select_conflict.out @@ -3,299 +3,299 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-insert-into-select-conflict-update s2-begin s2-update s1-commit s2-commit create_distributed_table - -step s1-begin: + +step s1-begin: SET citus.shard_replication_factor to 1; BEGIN; -step s1-insert-into-select-conflict-update: +step s1-insert-into-select-conflict-update: INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; -col_1 col_2 +col_1 col_2 -1 1 -2 2 -3 3 -4 4 -5 5 -step s2-begin: +1 1 +2 2 +3 3 +4 4 +5 5 +step s2-begin: BEGIN; -step s2-update: +step s2-update: UPDATE target_table SET col_2 = 5; -step s1-commit: +step s1-commit: COMMIT; step s2-update: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s1-insert-into-select-conflict-do-nothing s2-begin s2-delete s1-commit s2-commit create_distributed_table - -step s1-begin: + +step s1-begin: SET citus.shard_replication_factor to 1; BEGIN; -step s1-insert-into-select-conflict-do-nothing: +step s1-insert-into-select-conflict-do-nothing: INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT DO NOTHING; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete: +step s2-delete: DELETE FROM target_table; -step s1-commit: +step s1-commit: COMMIT; step s2-delete: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s1-insert-into-select-conflict-do-nothing s2-begin s2-insert-into-select-conflict-update s1-commit s2-commit create_distributed_table - -step s1-begin: + +step s1-begin: SET citus.shard_replication_factor to 1; BEGIN; -step s1-insert-into-select-conflict-do-nothing: +step s1-insert-into-select-conflict-do-nothing: INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT DO NOTHING; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-into-select-conflict-update: +step s2-insert-into-select-conflict-update: INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; -step s1-commit: +step s1-commit: COMMIT; step s2-insert-into-select-conflict-update: <... completed> -col_1 col_2 +col_1 col_2 -1 1 -2 2 -3 3 -4 4 -5 5 -step s2-commit: +1 1 +2 2 +3 3 +4 4 +5 5 +step s2-commit: COMMIT; starting permutation: s1-begin s1-insert-into-select-conflict-update s2-begin s2-insert-into-select-conflict-update s1-commit s2-commit create_distributed_table - -step s1-begin: + +step s1-begin: SET citus.shard_replication_factor to 1; BEGIN; -step s1-insert-into-select-conflict-update: +step s1-insert-into-select-conflict-update: INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; -col_1 col_2 +col_1 col_2 -1 1 -2 2 -3 3 -4 4 -5 5 -step s2-begin: +1 1 +2 2 +3 3 +4 4 +5 5 +step s2-begin: BEGIN; -step s2-insert-into-select-conflict-update: +step s2-insert-into-select-conflict-update: INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; -step s1-commit: +step s1-commit: COMMIT; step s2-insert-into-select-conflict-update: <... completed> -col_1 col_2 +col_1 col_2 -1 1 -2 2 -3 3 -4 4 -5 5 -step s2-commit: +1 1 +2 2 +3 3 +4 4 +5 5 +step s2-commit: COMMIT; starting permutation: s1-begin s1-insert-into-select-conflict-update s2-begin s2-insert-into-select-conflict-do-nothing s1-commit s2-commit create_distributed_table - -step s1-begin: + +step s1-begin: SET citus.shard_replication_factor to 1; BEGIN; -step s1-insert-into-select-conflict-update: +step s1-insert-into-select-conflict-update: INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; -col_1 col_2 +col_1 col_2 -1 1 -2 2 -3 3 -4 4 -5 5 -step s2-begin: +1 1 +2 2 +3 3 +4 4 +5 5 +step s2-begin: BEGIN; -step s2-insert-into-select-conflict-do-nothing: +step s2-insert-into-select-conflict-do-nothing: INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT DO NOTHING; -step s1-commit: +step s1-commit: COMMIT; step s2-insert-into-select-conflict-do-nothing: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin-replication-factor-2 s1-insert-into-select-conflict-update-replication-factor-2 s2-begin-replication-factor-2 s2-insert-into-select-conflict-update-replication-factor-2 s1-commit s2-commit create_distributed_table - -step s1-begin-replication-factor-2: + +step s1-begin-replication-factor-2: SET citus.shard_replication_factor to 2; BEGIN; -step s1-insert-into-select-conflict-update-replication-factor-2: +step s1-insert-into-select-conflict-update-replication-factor-2: INSERT INTO target_table_2 - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; -col_1 col_2 col_3 +col_1 col_2 col_3 -1 1 -2 2 -3 3 -4 4 -5 5 -step s2-begin-replication-factor-2: +1 1 +2 2 +3 3 +4 4 +5 5 +step s2-begin-replication-factor-2: SET citus.shard_replication_factor to 2; BEGIN; -step s2-insert-into-select-conflict-update-replication-factor-2: +step s2-insert-into-select-conflict-update-replication-factor-2: INSERT INTO target_table_2 - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; -step s1-commit: +step s1-commit: COMMIT; step s2-insert-into-select-conflict-update-replication-factor-2: <... completed> -col_1 col_2 col_3 +col_1 col_2 col_3 -1 1 -2 2 -3 3 -4 4 -5 5 -step s2-commit: +1 1 +2 2 +3 3 +4 4 +5 5 +step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_insert_select_vs_all.out b/src/test/regress/expected/isolation_insert_select_vs_all.out index 9800bd726..7987f87c5 100644 --- a/src/test/regress/expected/isolation_insert_select_vs_all.out +++ b/src/test/regress/expected/isolation_insert_select_vs_all.out @@ -3,8 +3,8 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-insert-select s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -15,15 +15,15 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-update-on-inserted s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -34,15 +34,15 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-update-on-inserted: UPDATE insert_of_insert_select_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-delete-on-inserted s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -53,15 +53,15 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-delete-on-inserted: DELETE FROM insert_of_insert_select_hash WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-truncate-on-inserted s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -73,15 +73,15 @@ step s2-truncate-on-inserted: TRUNCATE insert_of_insert_select_hash; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-drop-on-inserted s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -93,15 +93,15 @@ step s2-drop-on-inserted: DROP TABLE insert_of_insert_select_hash; step s1-commit: COMMIT; step s2-drop-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-create-index-on-inserted s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -113,9 +113,9 @@ step s2-ddl-create-index-on-inserted: CREATE INDEX insert_of_insert_select_hash_ step s1-commit: COMMIT; step s2-ddl-create-index-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers @@ -125,8 +125,8 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index-on-inserted s1-begin s1-insert-select s2-ddl-drop-index-on-inserted s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -139,9 +139,9 @@ step s2-ddl-drop-index-on-inserted: DROP INDEX insert_of_insert_select_hash_inde step s1-commit: COMMIT; step s2-ddl-drop-index-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers @@ -151,8 +151,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-create-index-concurrently-on-inserted s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -164,9 +164,9 @@ step s2-ddl-create-index-concurrently-on-inserted: CREATE INDEX CONCURRENTLY ins step s1-commit: COMMIT; step s2-ddl-create-index-concurrently-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers @@ -176,8 +176,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-add-column-on-inserted s1-commit s1-select-count s1-show-columns-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -189,9 +189,9 @@ step s2-ddl-add-column-on-inserted: ALTER TABLE insert_of_insert_select_hash ADD step s1-commit: COMMIT; step s2-ddl-add-column-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -201,8 +201,8 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column-on-inserted s1-begin s1-insert-select s2-ddl-drop-column-on-inserted s1-commit s1-select-count s1-show-columns-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -215,9 +215,9 @@ step s2-ddl-drop-column-on-inserted: ALTER TABLE insert_of_insert_select_hash DR step s1-commit: COMMIT; step s2-ddl-drop-column-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -227,8 +227,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-rename-column-on-inserted s1-commit s1-select-count s1-show-columns-inserted s1-show-columns-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -240,9 +240,9 @@ step s2-ddl-rename-column-on-inserted: ALTER TABLE insert_of_insert_select_hash step s1-commit: COMMIT; step s2-ddl-rename-column-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -257,8 +257,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-select s2-table-size-on-inserted s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -269,18 +269,18 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-table-size-on-inserted: SELECT citus_total_relation_size('insert_of_insert_select_hash'); citus_total_relation_size -65536 +65536 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-master-modify-multiple-shards-on-inserted s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -291,15 +291,15 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-master-modify-multiple-shards-on-inserted: DELETE FROM insert_of_insert_select_hash; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-master-drop-all-shards-on-inserted s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -312,19 +312,19 @@ step s1-commit: COMMIT; step s2-master-drop-all-shards-on-inserted: <... completed> master_drop_all_shards -4 +4 step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-drop-on-inserted s1-create-non-distributed-table-on-inserted s1-initialize s1-begin s1-insert-select s2-distribute-table-on-inserted s1-commit s1-select-count create_distributed_table - + step s1-drop-on-inserted: DROP TABLE insert_of_insert_select_hash; step s1-create-non-distributed-table-on-inserted: CREATE TABLE insert_of_insert_select_hash(id integer, data text); -step s1-initialize: +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -336,18 +336,18 @@ ERROR: cannot INSERT rows from a distributed query into a local table step s2-distribute-table-on-inserted: SELECT create_distributed_table('insert_of_insert_select_hash', 'id'); create_distributed_table - + step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-update-on-selected s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -358,15 +358,15 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-update-on-selected: UPDATE select_of_insert_select_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-delete-on-selected s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -377,15 +377,15 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-delete-on-selected: DELETE FROM select_of_insert_select_hash WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-insert-select s2-truncate-on-selected s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -397,15 +397,15 @@ step s2-truncate-on-selected: TRUNCATE select_of_insert_select_hash; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-insert-select s2-drop-on-selected s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -422,8 +422,8 @@ ERROR: relation "select_of_insert_select_hash" does not exist starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-create-index-on-selected s1-commit s1-select-count s1-show-indexes-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -434,9 +434,9 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-ddl-create-index-on-selected: CREATE INDEX select_of_insert_select_hash_index ON select_of_insert_select_hash(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers @@ -446,8 +446,8 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index-on-selected s1-begin s1-insert-select s2-ddl-drop-index-on-selected s1-commit s1-select-count s1-show-indexes-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -460,9 +460,9 @@ step s2-ddl-drop-index-on-selected: DROP INDEX select_of_insert_select_hash_inde step s1-commit: COMMIT; step s2-ddl-drop-index-on-selected: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers @@ -472,8 +472,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-create-index-concurrently-on-selected s1-commit s1-select-count s1-show-indexes-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -485,9 +485,9 @@ step s2-ddl-create-index-concurrently-on-selected: CREATE INDEX CONCURRENTLY sel step s1-commit: COMMIT; step s2-ddl-create-index-concurrently-on-selected: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers @@ -497,8 +497,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-add-column-on-selected s1-commit s1-select-count s1-show-columns-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -510,9 +510,9 @@ step s2-ddl-add-column-on-selected: ALTER TABLE select_of_insert_select_hash ADD step s1-commit: COMMIT; step s2-ddl-add-column-on-selected: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -522,8 +522,8 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column-on-selected s1-begin s1-insert-select s2-ddl-drop-column-on-selected s1-commit s1-select-count s1-show-columns-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -536,9 +536,9 @@ ERROR: INSERT has more expressions than target columns step s2-ddl-drop-column-on-selected: ALTER TABLE select_of_insert_select_hash DROP new_column; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -548,8 +548,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-rename-column-on-selected s1-commit s1-select-count s1-show-columns-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -561,9 +561,9 @@ step s2-ddl-rename-column-on-selected: ALTER TABLE select_of_insert_select_hash step s1-commit: COMMIT; step s2-ddl-rename-column-on-selected: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -573,8 +573,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-select s2-table-size-on-selected s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -585,18 +585,18 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-table-size-on-selected: SELECT citus_total_relation_size('select_of_insert_select_hash'); citus_total_relation_size -65536 +65536 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-master-modify-multiple-shards-on-selected s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -607,15 +607,15 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-master-modify-multiple-shards-on-selected: DELETE FROM select_of_insert_select_hash; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-insert-select s2-master-drop-all-shards-on-selected s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -628,19 +628,19 @@ step s1-commit: COMMIT; step s2-master-drop-all-shards-on-selected: <... completed> master_drop_all_shards -4 +4 step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -0 +0 starting permutation: s1-drop-on-selected s1-create-non-distributed-table-on-selected s1-initialize s1-begin s1-insert-select s2-distribute-table-on-selected s1-commit s1-select-count create_distributed_table - + step s1-drop-on-selected: DROP TABLE select_of_insert_select_hash; step s1-create-non-distributed-table-on-selected: CREATE TABLE select_of_insert_select_hash(id integer, data text); -step s1-initialize: +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -651,18 +651,18 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-distribute-table-on-selected: SELECT create_distributed_table('select_of_insert_select_hash', 'id'); create_distributed_table - + step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-update-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -673,15 +673,15 @@ step s1-update-on-inserted: UPDATE insert_of_insert_select_hash SET data = 'l' W step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-delete-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -692,15 +692,15 @@ step s1-delete-on-inserted: DELETE FROM insert_of_insert_select_hash WHERE id = step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-truncate-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -712,15 +712,15 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-drop-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -733,15 +733,15 @@ step s1-commit: COMMIT; step s2-insert-select: <... completed> error in steps s1-commit s2-insert-select: ERROR: relation "insert_of_insert_select_hash" does not exist step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-ddl-create-index-on-inserted s2-insert-select s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -753,9 +753,9 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers @@ -765,8 +765,8 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index-on-inserted s1-begin s1-ddl-drop-index-on-inserted s2-insert-select s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -779,9 +779,9 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers @@ -791,8 +791,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column-on-inserted s2-insert-select s1-commit s1-select-count s1-show-columns-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -804,9 +804,9 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -816,8 +816,8 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column-on-inserted s1-begin s1-ddl-drop-column-on-inserted s2-insert-select s1-commit s1-select-count s1-show-columns-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -830,9 +830,9 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -842,8 +842,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column-on-inserted s2-insert-select s1-commit s1-select-count s1-show-columns-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -855,9 +855,9 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -867,8 +867,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -878,19 +878,19 @@ step s1-begin: BEGIN; step s1-table-size-on-inserted: SELECT citus_total_relation_size('insert_of_insert_select_hash'); citus_total_relation_size -65536 +65536 step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -901,15 +901,15 @@ step s1-master-modify-multiple-shards-on-inserted: DELETE FROM insert_of_insert_ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -919,23 +919,23 @@ step s1-begin: BEGIN; step s1-master-drop-all-shards-on-inserted: SELECT master_drop_all_shards('insert_of_insert_select_hash'::regclass, 'public', 'insert_of_insert_select_hash'); master_drop_all_shards -4 +4 step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> error in steps s1-commit s2-insert-select: ERROR: could not find any shards into which to copy step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-drop-on-inserted s1-create-non-distributed-table-on-inserted s1-initialize s1-begin s1-distribute-table-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-drop-on-inserted: DROP TABLE insert_of_insert_select_hash; step s1-create-non-distributed-table-on-inserted: CREATE TABLE insert_of_insert_select_hash(id integer, data text); -step s1-initialize: +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -945,20 +945,20 @@ step s1-begin: BEGIN; step s1-distribute-table-on-inserted: SELECT create_distributed_table('insert_of_insert_select_hash', 'id'); create_distributed_table - + step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-update-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -969,15 +969,15 @@ step s1-update-on-selected: UPDATE select_of_insert_select_hash SET data = 'l' W step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-delete-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -988,15 +988,15 @@ step s1-delete-on-selected: DELETE FROM select_of_insert_select_hash WHERE id = step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-truncate-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1008,15 +1008,15 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-drop-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1034,8 +1034,8 @@ ERROR: relation "select_of_insert_select_hash" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index-on-selected s2-insert-select s1-commit s1-select-count s1-show-indexes-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1046,9 +1046,9 @@ step s1-ddl-create-index-on-selected: CREATE INDEX select_of_insert_select_hash_ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers @@ -1058,8 +1058,8 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index-on-selected s1-begin s1-ddl-drop-index-on-selected s2-insert-select s1-commit s1-select-count s1-show-indexes-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1072,9 +1072,9 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers @@ -1084,8 +1084,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column-on-selected s2-insert-select s1-commit s1-select-count s1-show-columns-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1098,9 +1098,9 @@ step s1-commit: COMMIT; step s2-insert-select: <... completed> error in steps s1-commit s2-insert-select: ERROR: INSERT has more expressions than target columns step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1110,8 +1110,8 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column-on-selected s1-begin s1-ddl-drop-column-on-selected s2-insert-select s1-commit s1-select-count s1-show-columns-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1124,9 +1124,9 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1136,8 +1136,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column-on-selected s2-insert-select s1-commit s1-select-count s1-show-columns-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1149,9 +1149,9 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1161,8 +1161,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1172,19 +1172,19 @@ step s1-begin: BEGIN; step s1-table-size-on-selected: SELECT citus_total_relation_size('select_of_insert_select_hash'); citus_total_relation_size -65536 +65536 step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1195,15 +1195,15 @@ step s1-master-modify-multiple-shards-on-selected: DELETE FROM select_of_insert_ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1213,22 +1213,22 @@ step s1-begin: BEGIN; step s1-master-drop-all-shards-on-selected: SELECT master_drop_all_shards('select_of_insert_select_hash'::regclass, 'public', 'select_of_insert_select_hash'); master_drop_all_shards -4 +4 step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -0 +0 starting permutation: s1-drop-on-selected s1-create-non-distributed-table-on-selected s1-initialize s1-begin s1-distribute-table-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-drop-on-selected: DROP TABLE select_of_insert_select_hash; step s1-create-non-distributed-table-on-selected: CREATE TABLE select_of_insert_select_hash(id integer, data text); -step s1-initialize: +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1238,10 +1238,10 @@ step s1-begin: BEGIN; step s1-distribute-table-on-selected: SELECT create_distributed_table('select_of_insert_select_hash', 'id'); create_distributed_table - + step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 diff --git a/src/test/regress/expected/isolation_insert_select_vs_all_on_mx.out b/src/test/regress/expected/isolation_insert_select_vs_all_on_mx.out index 97e9b1a5f..178c5a4dd 100644 --- a/src/test/regress/expected/isolation_insert_select_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_insert_select_vs_all_on_mx.out @@ -1,963 +1,963 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-colocated-insert-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-colocated-insert-select: + +step s1-colocated-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-colocated-insert-select: + +step s2-colocated-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -15 +15 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-colocated-insert-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select-via-coordinator: + +step s1-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-colocated-insert-select: + +step s2-colocated-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -15 +15 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-insert-select-via-coordinator s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-colocated-insert-select: + +step s1-colocated-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-select-via-coordinator: + +step s2-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -15 +15 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-insert-select-via-coordinator s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select-via-coordinator: + +step s1-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-select-via-coordinator: + +step s2-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -15 +15 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select-via-coordinator: + +step s1-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert: + +step s2-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table VALUES (5, 50), (6, 60)') run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -12 +12 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select-via-coordinator: + +step s1-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select: + +step s2-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table') run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -10 +10 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-colocated-insert-select: + +step s1-colocated-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update: + +step s2-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE dist_table SET value=55 WHERE id=5'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -10 +10 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select-via-coordinator: + +step s1-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update: + +step s2-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE dist_table SET value=55 WHERE id=5'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -10 +10 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-colocated-insert-select: + +step s1-colocated-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy: + +step s2-copy: SELECT run_commands_on_session_level_connection_to_node('COPY dist_table FROM PROGRAM ''echo 5, 50 && echo 9, 90 && echo 10, 100''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -13 +13 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select-via-coordinator: + +step s1-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy: + +step s2-copy: SELECT run_commands_on_session_level_connection_to_node('COPY dist_table FROM PROGRAM ''echo 5, 50 && echo 9, 90 && echo 10, 100''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -13 +13 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-colocated-insert-select: + +step s1-colocated-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-begin: + +step s2-begin: BEGIN; -step s2-coordinator-drop: +step s2-coordinator-drop: DROP TABLE dist_table; -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-coordinator-drop: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s1-stop-connection: +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; ERROR: relation "dist_table" does not exist restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select-via-coordinator: + +step s1-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-begin: + +step s2-begin: BEGIN; -step s2-coordinator-drop: +step s2-coordinator-drop: DROP TABLE dist_table; -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-coordinator-drop: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s1-stop-connection: +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; ERROR: relation "dist_table" does not exist restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-colocated-insert-select: + +step s1-colocated-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -10 +10 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select-via-coordinator: + +step s1-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -10 +10 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_insert_vs_all.out b/src/test/regress/expected/isolation_insert_vs_all.out index b230818a3..197130b47 100644 --- a/src/test/regress/expected/isolation_insert_vs_all.out +++ b/src/test/regress/expected/isolation_insert_vs_all.out @@ -3,105 +3,105 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-insert s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -7 +7 starting permutation: s1-initialize s1-begin s1-insert s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-insert s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-insert-select: INSERT INTO insert_hash SELECT * FROM insert_hash; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-insert s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-update: UPDATE insert_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-insert s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-delete: DELETE FROM insert_hash WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-insert s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); @@ -109,14 +109,14 @@ step s2-truncate: TRUNCATE insert_hash; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-insert s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); @@ -129,7 +129,7 @@ ERROR: relation "insert_hash" does not exist starting permutation: s1-initialize s1-begin s1-insert s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); @@ -137,9 +137,9 @@ step s2-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -149,7 +149,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-insert s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-begin: BEGIN; @@ -158,9 +158,9 @@ step s2-ddl-drop-index: DROP INDEX insert_hash_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -170,7 +170,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); @@ -178,9 +178,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY insert_hash_ind step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -190,7 +190,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); @@ -198,9 +198,9 @@ step s2-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -210,7 +210,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-insert s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -219,9 +219,9 @@ step s2-ddl-drop-column: ALTER TABLE insert_hash DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -231,7 +231,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); @@ -239,9 +239,9 @@ step s2-ddl-rename-column: ALTER TABLE insert_hash RENAME data TO new_column; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -251,38 +251,38 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-table-size: SELECT citus_total_relation_size('insert_hash'); citus_total_relation_size -57344 +57344 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-insert s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-master-modify-multiple-shards: DELETE FROM insert_hash; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -1 +1 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-insert s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE insert_hash; step s1-create-non-distributed-table: CREATE TABLE insert_hash(id integer, data text); COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -293,58 +293,58 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - -step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count -11 +step s1-select-count: SELECT COUNT(*) FROM insert_hash; +count + +11 starting permutation: s1-initialize s1-begin s1-insert-select s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_hash SELECT * FROM insert_hash; step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-update s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE insert_hash SET data = 'l' WHERE id = 4; step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-delete s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM insert_hash WHERE id = 4; step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-truncate s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE insert_hash; @@ -352,14 +352,14 @@ step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -1 +1 starting permutation: s1-initialize s1-begin s1-drop s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE insert_hash; @@ -373,7 +373,7 @@ ERROR: relation "insert_hash" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-insert s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); @@ -381,9 +381,9 @@ step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -393,7 +393,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-insert s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-begin: BEGIN; @@ -402,9 +402,9 @@ step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -414,7 +414,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-insert s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; @@ -422,9 +422,9 @@ step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -434,7 +434,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-insert s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -443,9 +443,9 @@ step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -455,7 +455,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-insert s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE insert_hash RENAME data TO new_column; @@ -463,9 +463,9 @@ step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -475,38 +475,38 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('insert_hash'); citus_total_relation_size -57344 +57344 step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM insert_hash; step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -1 +1 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-insert s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE insert_hash; step s1-create-non-distributed-table: CREATE TABLE insert_hash(id integer, data text); COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -514,61 +514,61 @@ step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('insert_hash', 'id'); create_distributed_table - + step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-insert-select: INSERT INTO insert_hash SELECT * FROM insert_hash; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -13 +13 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-update: UPDATE insert_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-delete: DELETE FROM insert_hash WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -7 +7 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); @@ -576,14 +576,14 @@ step s2-truncate: TRUNCATE insert_hash; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); @@ -596,7 +596,7 @@ ERROR: relation "insert_hash" does not exist starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); @@ -604,9 +604,9 @@ step s2-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -616,7 +616,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-insert-multi-row s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-begin: BEGIN; @@ -625,9 +625,9 @@ step s2-ddl-drop-index: DROP INDEX insert_hash_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -637,7 +637,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); @@ -645,9 +645,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY insert_hash_ind step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -657,7 +657,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); @@ -665,9 +665,9 @@ step s2-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -677,7 +677,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-insert-multi-row s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -686,9 +686,9 @@ step s2-ddl-drop-column: ALTER TABLE insert_hash DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -698,7 +698,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); @@ -706,9 +706,9 @@ step s2-ddl-rename-column: ALTER TABLE insert_hash RENAME data TO new_column; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -718,38 +718,38 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-table-size: SELECT citus_total_relation_size('insert_hash'); citus_total_relation_size -57344 +57344 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-master-modify-multiple-shards: DELETE FROM insert_hash; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -3 +3 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-insert-multi-row s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE insert_hash; step s1-create-non-distributed-table: CREATE TABLE insert_hash(id integer, data text); COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -760,58 +760,58 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - -step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count -13 +step s1-select-count: SELECT COUNT(*) FROM insert_hash; +count + +13 starting permutation: s1-initialize s1-begin s1-insert-select s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_hash SELECT * FROM insert_hash; step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -13 +13 starting permutation: s1-initialize s1-begin s1-update s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE insert_hash SET data = 'l' WHERE id = 4; step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 starting permutation: s1-initialize s1-begin s1-delete s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM insert_hash WHERE id = 4; step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -7 +7 starting permutation: s1-initialize s1-begin s1-truncate s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE insert_hash; @@ -819,14 +819,14 @@ step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -3 +3 starting permutation: s1-initialize s1-begin s1-drop s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE insert_hash; @@ -840,7 +840,7 @@ ERROR: relation "insert_hash" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-insert-multi-row s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); @@ -848,9 +848,9 @@ step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -860,7 +860,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-insert-multi-row s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-begin: BEGIN; @@ -869,9 +869,9 @@ step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -881,7 +881,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-insert-multi-row s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; @@ -889,9 +889,9 @@ step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -901,7 +901,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-insert-multi-row s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -910,9 +910,9 @@ step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -922,7 +922,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-insert-multi-row s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE insert_hash RENAME data TO new_column; @@ -930,9 +930,9 @@ step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -942,38 +942,38 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('insert_hash'); citus_total_relation_size -57344 +57344 step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM insert_hash; step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -3 +3 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE insert_hash; step s1-create-non-distributed-table: CREATE TABLE insert_hash(id integer, data text); COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -981,11 +981,11 @@ step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('insert_hash', 'id'); create_distributed_table - + step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -13 +13 diff --git a/src/test/regress/expected/isolation_insert_vs_all_on_mx.out b/src/test/regress/expected/isolation_insert_vs_all_on_mx.out index 5e041ad30..5a70f0bf3 100644 --- a/src/test/regress/expected/isolation_insert_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_insert_vs_all_on_mx.out @@ -1,783 +1,783 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert: + +step s2-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -7 -step s1-stop-connection: +7 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-multi-row: + +step s1-insert-multi-row: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60), (7, 70), (8, 80)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert: + +step s2-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -9 -step s1-stop-connection: +9 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-insert-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-multi-row: + +step s2-insert-multi-row: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60), (7, 70), (8, 80)'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -9 -step s1-stop-connection: +9 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-insert-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-multi-row: + +step s1-insert-multi-row: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60), (7, 70), (8, 80)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-multi-row: + +step s2-insert-multi-row: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60), (7, 70), (8, 80)'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -11 -step s1-stop-connection: +11 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select: + +step s2-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM insert_table WHERE id = 6'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -6 -step s1-stop-connection: +6 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-select: + +step s2-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table SELECT * FROM insert_table'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -11 -step s1-stop-connection: +11 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update: + +step s2-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE insert_table SET value = 65 WHERE id = 6'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -6 -step s1-stop-connection: +6 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-update-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-multi-row: + +step s1-insert-multi-row: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60), (7, 70), (8, 80)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-multi-row: + +step s2-update-multi-row: SELECT run_commands_on_session_level_connection_to_node('UPDATE insert_table SET value = 67 WHERE id IN (6, 7)'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -8 -step s1-stop-connection: +8 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy: + +step s2-copy: SELECT run_commands_on_session_level_connection_to_node('COPY insert_table FROM PROGRAM ''echo 9, 90 && echo 10, 100''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -8 -step s1-stop-connection: +8 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE insert_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -0 -step s1-stop-connection: +0 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM insert_table WHERE id = 6 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -6 -step s1-stop-connection: +6 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_insert_vs_vacuum.out b/src/test/regress/expected/isolation_insert_vs_vacuum.out index eb6c481b1..d9fda9453 100644 --- a/src/test/regress/expected/isolation_insert_vs_vacuum.out +++ b/src/test/regress/expected/isolation_insert_vs_vacuum.out @@ -3,34 +3,34 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-insert s2-vacuum-analyze s1-commit create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-insert: +step s1-insert: INSERT INTO test_insert_vacuum VALUES(1, 1); -step s2-vacuum-analyze: +step s2-vacuum-analyze: VACUUM ANALYZE test_insert_vacuum; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s1-insert s2-vacuum-full s1-commit create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-insert: +step s1-insert: INSERT INTO test_insert_vacuum VALUES(1, 1); -step s2-vacuum-full: +step s2-vacuum-full: VACUUM FULL test_insert_vacuum; -step s1-commit: +step s1-commit: COMMIT; step s2-vacuum-full: <... completed> diff --git a/src/test/regress/expected/isolation_master_append_table.out b/src/test/regress/expected/isolation_master_append_table.out index 6c88f7a56..539384e2d 100644 --- a/src/test/regress/expected/isolation_master_append_table.out +++ b/src/test/regress/expected/isolation_master_append_table.out @@ -1,13 +1,13 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s2-begin s1-master_append_table_to_shard s2-master_append_table_to_shard s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-master_append_table_to_shard: +step s1-master_append_table_to_shard: SELECT master_append_table_to_shard(shardid, 'table_to_be_appended', 'localhost', 57636) FROM @@ -17,8 +17,8 @@ step s1-master_append_table_to_shard: master_append_table_to_shard -0.0426667 -step s2-master_append_table_to_shard: +0.0426667 +step s2-master_append_table_to_shard: SELECT master_append_table_to_shard(shardid, 'table_to_be_appended', 'localhost', 57636) FROM @@ -26,13 +26,13 @@ step s2-master_append_table_to_shard: WHERE 'table_to_append'::regclass::oid = logicalrelid; -step s1-commit: +step s1-commit: COMMIT; step s2-master_append_table_to_shard: <... completed> master_append_table_to_shard -0.064 -step s2-commit: +0.064 +step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_master_apply_delete.out b/src/test/regress/expected/isolation_master_apply_delete.out index fe596eef3..b4ea51901 100644 --- a/src/test/regress/expected/isolation_master_apply_delete.out +++ b/src/test/regress/expected/isolation_master_apply_delete.out @@ -1,109 +1,109 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s2-begin s1-master_apply_delete_command_all_shard s2-master_apply_delete_command_all_shard s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-master_apply_delete_command_all_shard: +step s1-master_apply_delete_command_all_shard: SELECT master_apply_delete_command($$DELETE FROM table_to_delete_from WHERE id >= 0$$); master_apply_delete_command -1 -step s2-master_apply_delete_command_all_shard: +1 +step s2-master_apply_delete_command_all_shard: SELECT master_apply_delete_command($$DELETE FROM table_to_delete_from WHERE id >= 0$$); -step s1-commit: +step s1-commit: COMMIT; step s2-master_apply_delete_command_all_shard: <... completed> master_apply_delete_command -0 -step s2-commit: +0 +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-master_apply_delete_command_all_shard s2-master_apply_delete_command_row s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-master_apply_delete_command_all_shard: +step s1-master_apply_delete_command_all_shard: SELECT master_apply_delete_command($$DELETE FROM table_to_delete_from WHERE id >= 0$$); master_apply_delete_command -1 -step s2-master_apply_delete_command_row: +1 +step s2-master_apply_delete_command_row: SELECT master_apply_delete_command($$DELETE FROM table_to_delete_from WHERE id >= 0 and id < 3$$); -step s1-commit: +step s1-commit: COMMIT; step s2-master_apply_delete_command_row: <... completed> master_apply_delete_command -0 -step s2-commit: +0 +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-master_apply_delete_command_row s2-master_apply_delete_command_all_shard s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-master_apply_delete_command_row: +step s1-master_apply_delete_command_row: SELECT master_apply_delete_command($$DELETE FROM table_to_delete_from WHERE id >= 0 and id < 3$$); master_apply_delete_command -0 -step s2-master_apply_delete_command_all_shard: +0 +step s2-master_apply_delete_command_all_shard: SELECT master_apply_delete_command($$DELETE FROM table_to_delete_from WHERE id >= 0$$); -step s1-commit: +step s1-commit: COMMIT; step s2-master_apply_delete_command_all_shard: <... completed> master_apply_delete_command -1 -step s2-commit: +1 +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-master_apply_delete_command_row s2-master_apply_delete_command_row s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-master_apply_delete_command_row: +step s1-master_apply_delete_command_row: SELECT master_apply_delete_command($$DELETE FROM table_to_delete_from WHERE id >= 0 and id < 3$$); master_apply_delete_command -0 -step s2-master_apply_delete_command_row: +0 +step s2-master_apply_delete_command_row: SELECT master_apply_delete_command($$DELETE FROM table_to_delete_from WHERE id >= 0 and id < 3$$); -step s1-commit: +step s1-commit: COMMIT; step s2-master_apply_delete_command_row: <... completed> master_apply_delete_command -0 -step s2-commit: +0 +step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_master_update_node.out b/src/test/regress/expected/isolation_master_update_node.out index 09d9bf781..cc746278e 100644 --- a/src/test/regress/expected/isolation_master_update_node.out +++ b/src/test/regress/expected/isolation_master_update_node.out @@ -3,11 +3,11 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-insert s2-begin s2-update-node-1 s1-abort s2-abort create_distributed_table - + step s1-begin: BEGIN; step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); step s2-begin: BEGIN; -step s2-update-node-1: +step s2-update-node-1: -- update a specific node by address SELECT master_update_node(nodeid, 'localhost', nodeport + 10) FROM pg_dist_node @@ -18,21 +18,21 @@ step s1-abort: ABORT; step s2-update-node-1: <... completed> master_update_node - + step s2-abort: ABORT; master_remove_node - - + + starting permutation: s1-begin s1-insert s2-begin s2-update-node-1-force s2-abort s1-abort create_distributed_table - + step s1-begin: BEGIN; step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); step s2-begin: BEGIN; -step s2-update-node-1-force: +step s2-update-node-1-force: -- update a specific node by address (force) SELECT master_update_node(nodeid, 'localhost', nodeport + 10, force => true, lock_cooldown => 100) FROM pg_dist_node @@ -42,7 +42,7 @@ step s2-update-node-1-force: step s2-update-node-1-force: <... completed> master_update_node - + step s2-abort: ABORT; step s1-abort: ABORT; WARNING: this step had a leftover error message @@ -53,5 +53,5 @@ server closed the connection unexpectedly master_remove_node - - + + diff --git a/src/test/regress/expected/isolation_master_update_node_0.out b/src/test/regress/expected/isolation_master_update_node_0.out index eb450d715..8dbc71029 100644 --- a/src/test/regress/expected/isolation_master_update_node_0.out +++ b/src/test/regress/expected/isolation_master_update_node_0.out @@ -3,11 +3,11 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-insert s2-begin s2-update-node-1 s1-abort s2-abort create_distributed_table - + step s1-begin: BEGIN; step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); step s2-begin: BEGIN; -step s2-update-node-1: +step s2-update-node-1: -- update a specific node by address SELECT master_update_node(nodeid, 'localhost', nodeport + 10) FROM pg_dist_node @@ -18,21 +18,21 @@ step s1-abort: ABORT; step s2-update-node-1: <... completed> master_update_node - + step s2-abort: ABORT; master_remove_node - - + + starting permutation: s1-begin s1-insert s2-begin s2-update-node-1-force s2-abort s1-abort create_distributed_table - + step s1-begin: BEGIN; step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); step s2-begin: BEGIN; -step s2-update-node-1-force: +step s2-update-node-1-force: -- update a specific node by address (force) SELECT master_update_node(nodeid, 'localhost', nodeport + 10, force => true, lock_cooldown => 100) FROM pg_dist_node @@ -42,7 +42,7 @@ step s2-update-node-1-force: step s2-update-node-1-force: <... completed> master_update_node - + step s2-abort: ABORT; step s1-abort: ABORT; WARNING: this step had a leftover error message @@ -51,5 +51,5 @@ SSL connection has been closed unexpectedly master_remove_node - - + + diff --git a/src/test/regress/expected/isolation_modify_with_subquery_vs_dml.out b/src/test/regress/expected/isolation_modify_with_subquery_vs_dml.out index 67a1e5dcb..d61825ebc 100644 --- a/src/test/regress/expected/isolation_modify_with_subquery_vs_dml.out +++ b/src/test/regress/expected/isolation_modify_with_subquery_vs_dml.out @@ -1,127 +1,127 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s2-begin s2-modify_with_subquery_v1 s1-insert_to_events_test_table s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-modify_with_subquery_v1: +step s2-modify_with_subquery_v1: UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; -step s1-insert_to_events_test_table: +step s1-insert_to_events_test_table: INSERT INTO events_test_table VALUES(4,6,8,10); -step s2-commit: +step s2-commit: COMMIT; step s1-insert_to_events_test_table: <... completed> -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-modify_with_subquery_v1 s1-update_events_test_table s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-modify_with_subquery_v1: +step s2-modify_with_subquery_v1: UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; -step s1-update_events_test_table: +step s1-update_events_test_table: UPDATE users_test_table SET value_1 = 3; -step s2-commit: +step s2-commit: COMMIT; step s1-update_events_test_table: <... completed> -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-modify_with_subquery_v1 s1-delete_events_test_table s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-modify_with_subquery_v1: +step s2-modify_with_subquery_v1: UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; -step s1-delete_events_test_table: +step s1-delete_events_test_table: DELETE FROM events_test_table WHERE user_id = 1 or user_id = 3; -step s2-commit: +step s2-commit: COMMIT; step s1-delete_events_test_table: <... completed> -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s1-insert_to_events_test_table s2-modify_with_subquery_v1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-insert_to_events_test_table: +step s1-insert_to_events_test_table: INSERT INTO events_test_table VALUES(4,6,8,10); -step s2-modify_with_subquery_v1: +step s2-modify_with_subquery_v1: UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; -step s1-commit: +step s1-commit: COMMIT; step s2-modify_with_subquery_v1: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-update_events_test_table s2-modify_with_subquery_v1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update_events_test_table: +step s1-update_events_test_table: UPDATE users_test_table SET value_1 = 3; -step s2-modify_with_subquery_v1: +step s2-modify_with_subquery_v1: UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; -step s1-commit: +step s1-commit: COMMIT; step s2-modify_with_subquery_v1: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-delete_events_test_table s2-modify_with_subquery_v1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-delete_events_test_table: +step s1-delete_events_test_table: DELETE FROM events_test_table WHERE user_id = 1 or user_id = 3; -step s2-modify_with_subquery_v1: +step s2-modify_with_subquery_v1: UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; -step s1-commit: +step s1-commit: COMMIT; step s2-modify_with_subquery_v1: <... completed> -step s2-commit: +step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_multi_shard_modify_vs_all.out b/src/test/regress/expected/isolation_multi_shard_modify_vs_all.out index 47411070d..8fcaf2ae5 100644 --- a/src/test/regress/expected/isolation_multi_shard_modify_vs_all.out +++ b/src/test/regress/expected/isolation_multi_shard_modify_vs_all.out @@ -1,458 +1,458 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-update_all_value_1 s2-begin s2-select s1-commit s2-select s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-update_all_value_1: +step s1-update_all_value_1: UPDATE users_test_table SET value_1 = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 5 6 7 -2 12 7 18 -3 23 8 25 -4 42 9 23 -5 35 10 17 -6 21 11 25 -7 27 12 18 -step s1-commit: +1 5 6 7 +2 12 7 18 +3 23 8 25 +4 42 9 23 +5 35 10 17 +6 21 11 25 +7 27 12 18 +step s1-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 3 6 7 -2 3 7 18 -3 3 8 25 -4 3 9 23 -5 3 10 17 -6 3 11 25 -7 3 12 18 -step s2-commit: +1 3 6 7 +2 3 7 18 +3 3 8 25 +4 3 9 23 +5 3 10 17 +6 3 11 25 +7 3 12 18 +step s2-commit: COMMIT; starting permutation: s1-begin s1-update_all_value_1 s2-begin s2-update_all_value_1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-update_all_value_1: +step s1-update_all_value_1: UPDATE users_test_table SET value_1 = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-update_all_value_1: +step s2-update_all_value_1: UPDATE users_test_table SET value_1 = 6; -step s1-commit: +step s1-commit: COMMIT; step s2-update_all_value_1: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s1-update_even_concurrently s2-begin s2-update_odd_concurrently s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-update_even_concurrently: +step s1-update_even_concurrently: SET citus.enable_deadlock_prevention TO off; UPDATE users_test_table SET value_1 = 3 WHERE user_id % 2 = 0; SET citus.enable_deadlock_prevention TO on; -step s2-begin: +step s2-begin: BEGIN; -step s2-update_odd_concurrently: +step s2-update_odd_concurrently: SET citus.enable_deadlock_prevention = off; UPDATE users_test_table SET value_1 = 3 WHERE user_id % 2 = 1; SET citus.enable_deadlock_prevention TO on; -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s1-update_even_concurrently s2-begin s2-update_value_1_of_4_or_6_to_4 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-update_even_concurrently: +step s1-update_even_concurrently: SET citus.enable_deadlock_prevention TO off; UPDATE users_test_table SET value_1 = 3 WHERE user_id % 2 = 0; SET citus.enable_deadlock_prevention TO on; -step s2-begin: +step s2-begin: BEGIN; -step s2-update_value_1_of_4_or_6_to_4: +step s2-update_value_1_of_4_or_6_to_4: UPDATE users_test_table SET value_1 = 4 WHERE user_id = 4 or user_id = 6; -step s1-commit: +step s1-commit: COMMIT; step s2-update_value_1_of_4_or_6_to_4: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s1-update_value_1_of_1_or_3_to_5 s2-begin s2-update_value_1_of_4_or_6_to_4 s1-commit s2-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-update_value_1_of_1_or_3_to_5: +step s1-update_value_1_of_1_or_3_to_5: UPDATE users_test_table SET value_1 = 5 WHERE user_id = 1 or user_id = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-update_value_1_of_4_or_6_to_4: +step s2-update_value_1_of_4_or_6_to_4: UPDATE users_test_table SET value_1 = 4 WHERE user_id = 4 or user_id = 6; -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 5 6 7 -2 12 7 18 -3 5 8 25 -4 4 9 23 -5 35 10 17 -6 4 11 25 -7 27 12 18 +1 5 6 7 +2 12 7 18 +3 5 8 25 +4 4 9 23 +5 35 10 17 +6 4 11 25 +7 27 12 18 starting permutation: s1-begin s1-update_value_1_of_1_or_3_to_5 s2-begin s2-update_value_1_of_1_or_3_to_8 s1-commit s2-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-update_value_1_of_1_or_3_to_5: +step s1-update_value_1_of_1_or_3_to_5: UPDATE users_test_table SET value_1 = 5 WHERE user_id = 1 or user_id = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-update_value_1_of_1_or_3_to_8: +step s2-update_value_1_of_1_or_3_to_8: UPDATE users_test_table SET value_1 = 8 WHERE user_id = 1 or user_id = 3; -step s1-commit: +step s1-commit: COMMIT; step s2-update_value_1_of_1_or_3_to_8: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 8 6 7 -2 12 7 18 -3 8 8 25 -4 42 9 23 -5 35 10 17 -6 21 11 25 -7 27 12 18 +1 8 6 7 +2 12 7 18 +3 8 8 25 +4 42 9 23 +5 35 10 17 +6 21 11 25 +7 27 12 18 starting permutation: s1-begin s1-update_all_value_1 s2-begin s2-insert-to-table s1-commit s2-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-update_all_value_1: +step s1-update_all_value_1: UPDATE users_test_table SET value_1 = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-to-table: +step s2-insert-to-table: INSERT INTO users_test_table VALUES (1,2,3,4); -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 2 3 4 -1 3 6 7 -2 3 7 18 -3 3 8 25 -4 3 9 23 -5 3 10 17 -6 3 11 25 -7 3 12 18 +1 2 3 4 +1 3 6 7 +2 3 7 18 +3 3 8 25 +4 3 9 23 +5 3 10 17 +6 3 11 25 +7 3 12 18 starting permutation: s1-begin s1-update_all_value_1 s2-begin s2-insert-into-select s1-commit s2-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-update_all_value_1: +step s1-update_all_value_1: UPDATE users_test_table SET value_1 = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-into-select: +step s2-insert-into-select: INSERT INTO users_test_table SELECT * FROM events_test_table; -step s1-commit: +step s1-commit: COMMIT; step s2-insert-into-select: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 3 6 7 -1 5 7 7 -2 3 7 18 -3 3 8 25 -4 3 9 23 -5 22 9 25 -5 3 10 17 -7 41 10 23 -6 3 11 25 -7 3 12 18 -1 20 12 25 -3 26 13 18 -5 17 14 4 -3 11 78 18 +1 3 6 7 +1 5 7 7 +2 3 7 18 +3 3 8 25 +4 3 9 23 +5 22 9 25 +5 3 10 17 +7 41 10 23 +6 3 11 25 +7 3 12 18 +1 20 12 25 +3 26 13 18 +5 17 14 4 +3 11 78 18 starting permutation: s1-begin s2-begin s1-update_value_1_of_1_or_3_to_5 s2-update_value_1_of_1_or_3_to_8 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update_value_1_of_1_or_3_to_5: +step s1-update_value_1_of_1_or_3_to_5: UPDATE users_test_table SET value_1 = 5 WHERE user_id = 1 or user_id = 3; -step s2-update_value_1_of_1_or_3_to_8: +step s2-update_value_1_of_1_or_3_to_8: UPDATE users_test_table SET value_1 = 8 WHERE user_id = 1 or user_id = 3; -step s1-commit: +step s1-commit: COMMIT; step s2-update_value_1_of_1_or_3_to_8: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s2-update_value_1_of_1_or_3_to_8 s1-update_value_1_of_2_or_4_to_5 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-update_value_1_of_1_or_3_to_8: +step s2-update_value_1_of_1_or_3_to_8: UPDATE users_test_table SET value_1 = 8 WHERE user_id = 1 or user_id = 3; -step s1-update_value_1_of_2_or_4_to_5: +step s1-update_value_1_of_2_or_4_to_5: UPDATE users_test_table SET value_1 = 5 WHERE user_id = 2 or user_id = 4; -step s2-commit: +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s1-change_connection_mode_to_sequential s1-update_all_value_1 s2-begin s2-change_connection_mode_to_sequential s2-update_all_value_1 s1-commit s2-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-change_connection_mode_to_sequential: +step s1-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s1-update_all_value_1: +step s1-update_all_value_1: UPDATE users_test_table SET value_1 = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-change_connection_mode_to_sequential: +step s2-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s2-update_all_value_1: +step s2-update_all_value_1: UPDATE users_test_table SET value_1 = 6; -step s1-commit: +step s1-commit: COMMIT; step s2-update_all_value_1: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 6 6 7 -2 6 7 18 -3 6 8 25 -4 6 9 23 -5 6 10 17 -6 6 11 25 -7 6 12 18 +1 6 6 7 +2 6 7 18 +3 6 8 25 +4 6 9 23 +5 6 10 17 +6 6 11 25 +7 6 12 18 starting permutation: s1-begin s1-change_connection_mode_to_sequential s1-update_value_1_of_1_or_3_to_5 s2-begin s2-change_connection_mode_to_sequential s2-update_value_1_of_1_or_3_to_8 s1-commit s2-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-change_connection_mode_to_sequential: +step s1-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s1-update_value_1_of_1_or_3_to_5: +step s1-update_value_1_of_1_or_3_to_5: UPDATE users_test_table SET value_1 = 5 WHERE user_id = 1 or user_id = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-change_connection_mode_to_sequential: +step s2-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s2-update_value_1_of_1_or_3_to_8: +step s2-update_value_1_of_1_or_3_to_8: UPDATE users_test_table SET value_1 = 8 WHERE user_id = 1 or user_id = 3; -step s1-commit: +step s1-commit: COMMIT; step s2-update_value_1_of_1_or_3_to_8: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 8 6 7 -2 12 7 18 -3 8 8 25 -4 42 9 23 -5 35 10 17 -6 21 11 25 -7 27 12 18 +1 8 6 7 +2 12 7 18 +3 8 8 25 +4 42 9 23 +5 35 10 17 +6 21 11 25 +7 27 12 18 starting permutation: s1-begin s1-change_connection_mode_to_sequential s1-update_value_1_of_1_or_3_to_5 s2-begin s2-change_connection_mode_to_sequential s2-update_value_1_of_4_or_6_to_4 s1-commit s2-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-change_connection_mode_to_sequential: +step s1-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s1-update_value_1_of_1_or_3_to_5: +step s1-update_value_1_of_1_or_3_to_5: UPDATE users_test_table SET value_1 = 5 WHERE user_id = 1 or user_id = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-change_connection_mode_to_sequential: +step s2-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s2-update_value_1_of_4_or_6_to_4: +step s2-update_value_1_of_4_or_6_to_4: UPDATE users_test_table SET value_1 = 4 WHERE user_id = 4 or user_id = 6; -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 5 6 7 -2 12 7 18 -3 5 8 25 -4 4 9 23 -5 35 10 17 -6 4 11 25 -7 27 12 18 +1 5 6 7 +2 12 7 18 +3 5 8 25 +4 4 9 23 +5 35 10 17 +6 4 11 25 +7 27 12 18 starting permutation: s1-begin s2-begin s1-change_connection_mode_to_sequential s2-change_connection_mode_to_sequential s1-update_value_1_of_1_or_3_to_5 s2-update_value_1_of_1_or_3_to_8 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-change_connection_mode_to_sequential: +step s1-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s2-change_connection_mode_to_sequential: +step s2-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s1-update_value_1_of_1_or_3_to_5: +step s1-update_value_1_of_1_or_3_to_5: UPDATE users_test_table SET value_1 = 5 WHERE user_id = 1 or user_id = 3; -step s2-update_value_1_of_1_or_3_to_8: +step s2-update_value_1_of_1_or_3_to_8: UPDATE users_test_table SET value_1 = 8 WHERE user_id = 1 or user_id = 3; -step s1-commit: +step s1-commit: COMMIT; step s2-update_value_1_of_1_or_3_to_8: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-change_connection_mode_to_sequential s2-change_connection_mode_to_sequential s2-update_value_1_of_1_or_3_to_8 s1-update_value_1_of_2_or_4_to_5 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-change_connection_mode_to_sequential: +step s1-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s2-change_connection_mode_to_sequential: +step s2-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s2-update_value_1_of_1_or_3_to_8: +step s2-update_value_1_of_1_or_3_to_8: UPDATE users_test_table SET value_1 = 8 WHERE user_id = 1 or user_id = 3; -step s1-update_value_1_of_2_or_4_to_5: +step s1-update_value_1_of_2_or_4_to_5: UPDATE users_test_table SET value_1 = 5 WHERE user_id = 2 or user_id = 4; -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_multiuser_locking.out b/src/test/regress/expected/isolation_multiuser_locking.out index 6dea9cc91..1c8860726 100644 --- a/src/test/regress/expected/isolation_multiuser_locking.out +++ b/src/test/regress/expected/isolation_multiuser_locking.out @@ -1,284 +1,284 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s2-begin s2-reindex s1-insert s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s2-reindex: +step s2-reindex: REINDEX TABLE test_table; ERROR: must be owner of table test_table -step s1-insert: +step s1-insert: UPDATE test_table SET column2 = 1; -step s2-commit: +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-grant s1-begin s2-begin s2-reindex s1-insert s2-insert s2-commit s1-commit -step s1-grant: +step s1-grant: SET ROLE test_user_1; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); GRANT ALL ON test_table TO test_user_2; -bool_and +bool_and -t -step s1-begin: +t +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s2-reindex: +step s2-reindex: REINDEX TABLE test_table; ERROR: must be owner of table test_table -step s1-insert: +step s1-insert: UPDATE test_table SET column2 = 1; -step s2-insert: +step s2-insert: UPDATE test_table SET column2 = 2; ERROR: current transaction is aborted, commands ignored until end of transaction block -step s2-commit: +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-grant s1-begin s2-begin s1-reindex s2-insert s1-insert s1-commit s2-commit -step s1-grant: +step s1-grant: SET ROLE test_user_1; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); GRANT ALL ON test_table TO test_user_2; -bool_and +bool_and -t -step s1-begin: +t +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s1-reindex: +step s1-reindex: REINDEX TABLE test_table; -step s2-insert: +step s2-insert: UPDATE test_table SET column2 = 2; -step s1-insert: +step s1-insert: UPDATE test_table SET column2 = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-insert: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s2-index s1-insert s2-commit s1-commit s2-drop-index -step s1-begin: +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s2-index: +step s2-index: CREATE INDEX test_index ON test_table(column1); ERROR: must be owner of table test_table -step s1-insert: +step s1-insert: UPDATE test_table SET column2 = 1; -step s2-commit: +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; -step s2-drop-index: +step s2-drop-index: DROP INDEX IF EXISTS test_index; starting permutation: s1-grant s1-begin s2-begin s2-insert s1-index s2-insert s2-commit s1-commit s1-drop-index -step s1-grant: +step s1-grant: SET ROLE test_user_1; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); GRANT ALL ON test_table TO test_user_2; -bool_and +bool_and -t -step s1-begin: +t +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s2-insert: +step s2-insert: UPDATE test_table SET column2 = 2; -step s1-index: +step s1-index: CREATE INDEX test_index ON test_table(column1); -step s2-insert: +step s2-insert: UPDATE test_table SET column2 = 2; -step s2-commit: +step s2-commit: COMMIT; step s1-index: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s1-drop-index: +step s1-drop-index: DROP INDEX IF EXISTS test_index; starting permutation: s1-grant s1-begin s2-begin s1-index s2-index s1-insert s1-commit s2-commit s1-drop-index s2-drop-index -step s1-grant: +step s1-grant: SET ROLE test_user_1; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); GRANT ALL ON test_table TO test_user_2; -bool_and +bool_and -t -step s1-begin: +t +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s1-index: +step s1-index: CREATE INDEX test_index ON test_table(column1); -step s2-index: +step s2-index: CREATE INDEX test_index ON test_table(column1); ERROR: must be owner of table test_table -step s1-insert: +step s1-insert: UPDATE test_table SET column2 = 1; -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; -step s1-drop-index: +step s1-drop-index: DROP INDEX IF EXISTS test_index; -step s2-drop-index: +step s2-drop-index: DROP INDEX IF EXISTS test_index; starting permutation: s1-begin s2-begin s2-truncate s1-insert s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s2-truncate: +step s2-truncate: TRUNCATE test_table; ERROR: permission denied for table test_table -step s1-insert: +step s1-insert: UPDATE test_table SET column2 = 1; -step s2-commit: +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-grant s1-begin s2-begin s1-truncate s2-insert s1-insert s1-commit s2-commit -step s1-grant: +step s1-grant: SET ROLE test_user_1; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); GRANT ALL ON test_table TO test_user_2; -bool_and +bool_and -t -step s1-begin: +t +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s1-truncate: +step s1-truncate: TRUNCATE test_table; -step s2-insert: +step s2-insert: UPDATE test_table SET column2 = 2; -step s1-insert: +step s1-insert: UPDATE test_table SET column2 = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-insert: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-grant s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit -step s1-grant: +step s1-grant: SET ROLE test_user_1; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); GRANT ALL ON test_table TO test_user_2; -bool_and +bool_and -t -step s1-begin: +t +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s1-truncate: +step s1-truncate: TRUNCATE test_table; -step s2-truncate: +step s2-truncate: TRUNCATE test_table; -step s1-commit: +step s1-commit: COMMIT; step s2-truncate: <... completed> -step s2-commit: +step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_partitioned_copy_vs_all.out b/src/test/regress/expected/isolation_partitioned_copy_vs_all.out index 7b0a028a9..3b578a581 100644 --- a/src/test/regress/expected/isolation_partitioned_copy_vs_all.out +++ b/src/test/regress/expected/isolation_partitioned_copy_vs_all.out @@ -3,139 +3,139 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-router-select: SELECT * FROM partitioned_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-real-time-select: SELECT * FROM partitioned_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM partitioned_copy AS t1 JOIN partitioned_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert: INSERT INTO partitioned_copy VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert-select: INSERT INTO partitioned_copy SELECT * FROM partitioned_copy; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-update: UPDATE partitioned_copy SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-delete: DELETE FROM partitioned_copy WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -143,14 +143,14 @@ step s2-truncate: TRUNCATE partitioned_copy; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -163,7 +163,7 @@ ERROR: relation "partitioned_copy" does not exist starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -171,9 +171,9 @@ step s2-ddl-add-column: ALTER TABLE partitioned_copy ADD new_column int DEFAULT step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -183,7 +183,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE partitioned_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -192,9 +192,9 @@ step s2-ddl-drop-column: ALTER TABLE partitioned_copy DROP new_column; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -204,7 +204,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -212,9 +212,9 @@ step s2-ddl-rename-column: ALTER TABLE partitioned_copy RENAME data TO new_colum step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -224,38 +224,38 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-table-size: SELECT citus_total_relation_size('partitioned_copy'); citus_total_relation_size -0 +0 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-master-modify-multiple-shards: DELETE FROM partitioned_copy; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -264,16 +264,16 @@ step s1-commit: COMMIT; step s2-master-drop-all-shards: <... completed> master_drop_all_shards -4 +4 step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE partitioned_copy; step s1-create-non-distributed-table: CREATE TABLE partitioned_copy(id integer, data text, int_data int); COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; @@ -284,134 +284,134 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - -step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count -15 +step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; +count + +15 starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM partitioned_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM partitioned_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM partitioned_copy AS t1 JOIN partitioned_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO partitioned_copy VALUES(0, 'k', 0); step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO partitioned_copy SELECT * FROM partitioned_copy; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE partitioned_copy SET data = 'l' WHERE id = 0; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM partitioned_copy WHERE id = 1; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE partitioned_copy; @@ -419,14 +419,14 @@ step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 & step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE partitioned_copy; @@ -440,7 +440,7 @@ ERROR: relation "partitioned_copy" does not exist starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE partitioned_copy ADD new_column int DEFAULT 0; @@ -449,9 +449,9 @@ step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: missing data for column "new_column" step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -461,7 +461,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE partitioned_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -470,9 +470,9 @@ step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 & step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -482,7 +482,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE partitioned_copy RENAME data TO new_column; @@ -490,9 +490,9 @@ step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 & step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -502,57 +502,57 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('partitioned_copy'); citus_total_relation_size -0 +0 step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM partitioned_copy; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy'); master_drop_all_shards -4 +4 step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: could not find any shards into which to copy step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE partitioned_copy; step s1-create-non-distributed-table: CREATE TABLE partitioned_copy(id integer, data text, int_data int); COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; @@ -560,11 +560,11 @@ step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('partitioned_copy', 'id'); create_distributed_table - + step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -15 +15 diff --git a/src/test/regress/expected/isolation_progress_monitoring.out b/src/test/regress/expected/isolation_progress_monitoring.out index e2b699394..679e42c6b 100644 --- a/src/test/regress/expected/isolation_progress_monitoring.out +++ b/src/test/regress/expected/isolation_progress_monitoring.out @@ -1,7 +1,7 @@ Parsed test spec with 5 sessions starting permutation: take-locks s1-start-operation s2-start-operation s3-start-operation show-progress release-locks-1 show-progress release-locks-2 show-progress release-locks-3 -step take-locks: +step take-locks: -- Locks for steps of sample operation in s1 SELECT pg_advisory_lock(10); SELECT pg_advisory_lock(11); @@ -17,55 +17,55 @@ step take-locks: pg_advisory_lock - + pg_advisory_lock - + pg_advisory_lock - + pg_advisory_lock - + pg_advisory_lock - + pg_advisory_lock - + pg_advisory_lock - + pg_advisory_lock - + pg_advisory_lock - -step s1-start-operation: + +step s1-start-operation: SELECT sample_operation(1337, 10, -1); -step s2-start-operation: +step s2-start-operation: SELECT sample_operation(1337, 20, 2); -step s3-start-operation: +step s3-start-operation: SELECT sample_operation(3778, 30, 9); -step show-progress: +step show-progress: SELECT show_progress(1337); SELECT show_progress(3778); -show_progress +show_progress -(0,0) -(1,0) -(0,0) -(1,0) -show_progress +(0,0) +(1,0) +(0,0) +(1,0) +show_progress -(0,0) -(1,0) -step release-locks-1: +(0,0) +(1,0) +step release-locks-1: -- Release the locks of first steps of sample operations SELECT pg_advisory_unlock(10); SELECT pg_advisory_unlock(20); @@ -73,28 +73,28 @@ step release-locks-1: pg_advisory_unlock -t +t pg_advisory_unlock -t +t pg_advisory_unlock -t -step show-progress: +t +step show-progress: SELECT show_progress(1337); SELECT show_progress(3778); -show_progress +show_progress -(0,-1) -(1,0) -(0,2) -(1,0) -show_progress +(0,-1) +(1,0) +(0,2) +(1,0) +show_progress -(0,9) -(1,0) -step release-locks-2: +(0,9) +(1,0) +step release-locks-2: -- Release the locks of second steps of sample operations SELECT pg_advisory_unlock(11); SELECT pg_advisory_unlock(21); @@ -102,28 +102,28 @@ step release-locks-2: pg_advisory_unlock -t +t pg_advisory_unlock -t +t pg_advisory_unlock -t -step show-progress: +t +step show-progress: SELECT show_progress(1337); SELECT show_progress(3778); -show_progress +show_progress -(0,-1) -(1,-1) -(0,2) -(1,2) -show_progress +(0,-1) +(1,-1) +(0,2) +(1,2) +show_progress -(0,9) -(1,9) -step release-locks-3: +(0,9) +(1,9) +step release-locks-3: -- Release the locks of final steps of sample operations SELECT pg_advisory_unlock(12); SELECT pg_advisory_unlock(22); @@ -131,22 +131,22 @@ step release-locks-3: pg_advisory_unlock -t +t pg_advisory_unlock -t +t pg_advisory_unlock -t +t step s1-start-operation: <... completed> sample_operation - + step s2-start-operation: <... completed> sample_operation - + step s3-start-operation: <... completed> sample_operation - + diff --git a/src/test/regress/expected/isolation_range_copy_vs_all.out b/src/test/regress/expected/isolation_range_copy_vs_all.out index 1dacae535..5e0e7e474 100644 --- a/src/test/regress/expected/isolation_range_copy_vs_all.out +++ b/src/test/regress/expected/isolation_range_copy_vs_all.out @@ -3,139 +3,139 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-router-select: SELECT * FROM range_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-real-time-select: SELECT * FROM range_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM range_copy AS t1 JOIN range_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert: INSERT INTO range_copy VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert-select: INSERT INTO range_copy SELECT * FROM range_copy; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-update: UPDATE range_copy SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-delete: DELETE FROM range_copy WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -143,14 +143,14 @@ step s2-truncate: TRUNCATE range_copy; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -163,7 +163,7 @@ ERROR: relation "range_copy" does not exist starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -171,9 +171,9 @@ step s2-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers @@ -183,7 +183,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); step s1-begin: BEGIN; @@ -192,9 +192,9 @@ step s2-ddl-drop-index: DROP INDEX range_copy_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers @@ -204,7 +204,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -212,9 +212,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY range_copy_inde step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers @@ -224,7 +224,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -232,9 +232,9 @@ step s2-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -244,7 +244,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -253,9 +253,9 @@ step s2-ddl-drop-column: ALTER TABLE range_copy DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -265,7 +265,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -273,9 +273,9 @@ step s2-ddl-rename-column: ALTER TABLE range_copy RENAME data TO new_column; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -285,38 +285,38 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-table-size: SELECT citus_total_relation_size('range_copy'); citus_total_relation_size -32768 +32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-master-modify-multiple-shards: DELETE FROM range_copy; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-copy s2-master-apply-delete-command s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -325,16 +325,16 @@ step s1-commit: COMMIT; step s2-master-apply-delete-command: <... completed> master_apply_delete_command -1 +1 step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -343,16 +343,16 @@ step s1-commit: COMMIT; step s2-master-drop-all-shards: <... completed> master_drop_all_shards -2 +2 step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE range_copy; step s1-create-non-distributed-table: CREATE TABLE range_copy(id integer, data text, int_data int); step s1-begin: BEGIN; @@ -362,134 +362,134 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - -step s1-select-count: SELECT COUNT(*) FROM range_copy; -count -0 +step s1-select-count: SELECT COUNT(*) FROM range_copy; +count + +0 starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM range_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM range_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM range_copy AS t1 JOIN range_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO range_copy VALUES(0, 'k', 0); step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO range_copy SELECT * FROM range_copy; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE range_copy SET data = 'l' WHERE id = 0; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM range_copy WHERE id = 1; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE range_copy; @@ -497,14 +497,14 @@ step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE range_copy; @@ -518,7 +518,7 @@ ERROR: relation "range_copy" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); @@ -526,9 +526,9 @@ step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers @@ -538,7 +538,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); step s1-begin: BEGIN; @@ -547,9 +547,9 @@ step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers @@ -559,7 +559,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; @@ -568,9 +568,9 @@ step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: missing data for column "new_column" step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -580,7 +580,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -589,9 +589,9 @@ step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -601,7 +601,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE range_copy RENAME data TO new_column; @@ -609,9 +609,9 @@ step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -621,86 +621,86 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('range_copy'); citus_total_relation_size -32768 +32768 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM range_copy; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-master-apply-delete-command s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM range_copy WHERE id <= 4;'); master_apply_delete_command -1 +1 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); master_drop_all_shards -1 +1 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -5 +5 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE range_copy; step s1-create-non-distributed-table: CREATE TABLE range_copy(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('range_copy', 'id', 'range'); create_distributed_table - + step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: could not find any shards into which to copy step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -0 +0 diff --git a/src/test/regress/expected/isolation_ref2ref_foreign_keys.out b/src/test/regress/expected/isolation_ref2ref_foreign_keys.out index b59241288..507db3ccd 100644 --- a/src/test/regress/expected/isolation_ref2ref_foreign_keys.out +++ b/src/test/regress/expected/isolation_ref2ref_foreign_keys.out @@ -1,952 +1,952 @@ Parsed test spec with 2 sessions starting permutation: s2-begin s2-update-table-1 s1-begin s1-view-locks s1-rollback s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-1: +step s2-update-table-1: UPDATE ref_table_1 SET id = 2 WHERE id = 1; -step s1-begin: +step s1-begin: BEGIN; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -ExclusiveLock 1 -ShareLock 1 -step s1-rollback: +ExclusiveLock 1 +ShareLock 1 +step s1-rollback: ROLLBACK; -step s2-rollback: +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s2-begin s2-delete-table-1 s1-view-locks s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-1: +step s2-delete-table-1: DELETE FROM ref_table_1 WHERE id = 1; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -ExclusiveLock 1 -ShareLock 1 -step s2-rollback: +ExclusiveLock 1 +ShareLock 1 +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s2-begin s2-update-table-2 s1-view-locks s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-2: +step s2-update-table-2: UPDATE ref_table_2 SET id = 2 WHERE id = 1; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -ExclusiveLock 2 -ShareLock 1 -step s2-rollback: +ExclusiveLock 2 +ShareLock 1 +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s2-begin s2-delete-table-2 s1-view-locks s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-2: +step s2-delete-table-2: DELETE FROM ref_table_2 WHERE id = 1; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -ExclusiveLock 2 -ShareLock 1 -step s2-rollback: +ExclusiveLock 2 +ShareLock 1 +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s2-begin s2-update-table-3 s1-begin s1-view-locks s1-rollback s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-3: +step s2-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE id = 1; -step s1-begin: +step s1-begin: BEGIN; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -ExclusiveLock 3 -ShareLock 1 -step s1-rollback: +ExclusiveLock 3 +ShareLock 1 +step s1-rollback: ROLLBACK; -step s2-rollback: +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s2-begin s2-delete-table-3 s1-begin s1-view-locks s1-rollback s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-3: +step s2-delete-table-3: DELETE FROM ref_table_3 WHERE id = 1; -step s1-begin: +step s1-begin: BEGIN; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -ExclusiveLock 3 -ShareLock 1 -step s1-rollback: +ExclusiveLock 3 +ShareLock 1 +step s1-rollback: ROLLBACK; -step s2-rollback: +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s2-begin s2-insert-table-1 s1-view-locks s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-1: +step s2-insert-table-1: INSERT INTO ref_table_1 VALUES (7, 7); -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -RowExclusiveLock1 -ShareLock 1 -step s2-rollback: +RowExclusiveLock1 +ShareLock 1 +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s2-begin s2-insert-table-2 s1-view-locks s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-2: +step s2-insert-table-2: INSERT INTO ref_table_2 VALUES (7, 5); -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -RowExclusiveLock2 -ShareLock 1 -step s2-rollback: +RowExclusiveLock2 +ShareLock 1 +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s2-begin s2-insert-table-3 s1-view-locks s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-3: +step s2-insert-table-3: INSERT INTO ref_table_3 VALUES (7, 5); -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -RowExclusiveLock3 -ShareLock 1 -step s2-rollback: +RowExclusiveLock3 +ShareLock 1 +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s1-begin s2-begin s2-update-table-1 s1-delete-table-2 s2-commit s1-commit s1-select-table-2 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-1: +step s2-update-table-1: UPDATE ref_table_1 SET id = 2 WHERE id = 1; -step s1-delete-table-2: +step s1-delete-table-2: DELETE FROM ref_table_2 WHERE value = 2; -step s2-commit: +step s2-commit: COMMIT; step s1-delete-table-2: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -3 3 -5 5 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-update-table-1 s1-insert-table-2 s2-commit s1-commit s1-select-table-2 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-1: +step s2-update-table-1: UPDATE ref_table_1 SET id = 2 WHERE id = 1; -step s1-insert-table-2: +step s1-insert-table-2: INSERT INTO ref_table_2 VALUES (7, 2); -step s2-commit: +step s2-commit: COMMIT; step s1-insert-table-2: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -1 2 -3 3 -5 5 -7 2 +1 2 +3 3 +5 5 +7 2 starting permutation: s1-begin s2-begin s2-update-table-1 s1-update-table-2 s2-commit s1-commit s1-select-table-2 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-1: +step s2-update-table-1: UPDATE ref_table_1 SET id = 2 WHERE id = 1; -step s1-update-table-2: +step s1-update-table-2: UPDATE ref_table_2 SET id = 0 WHERE value = 2; -step s2-commit: +step s2-commit: COMMIT; step s1-update-table-2: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -0 2 -3 3 -5 5 +0 2 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-delete-table-1 s1-delete-table-2 s2-commit s1-commit s1-select-table-2 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-1: +step s2-delete-table-1: DELETE FROM ref_table_1 WHERE id = 1; -step s1-delete-table-2: +step s1-delete-table-2: DELETE FROM ref_table_2 WHERE value = 2; -step s2-commit: +step s2-commit: COMMIT; step s1-delete-table-2: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -3 3 -5 5 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-delete-table-1 s1-insert-table-2 s2-commit s1-commit s1-select-table-2 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-1: +step s2-delete-table-1: DELETE FROM ref_table_1 WHERE id = 1; -step s1-insert-table-2: +step s1-insert-table-2: INSERT INTO ref_table_2 VALUES (7, 2); -step s2-commit: +step s2-commit: COMMIT; step s1-insert-table-2: <... completed> -error in steps s2-commit s1-insert-table-2: ERROR: insert or update on table "ref_table_2_102048" violates foreign key constraint "ref_table_2_value_fkey_102048" -step s1-commit: +error in steps s2-commit s1-insert-table-2: ERROR: insert or update on table "ref_table_2_xxxxxxx" violates foreign key constraint "ref_table_2_value_fkey_xxxxxxx" +step s1-commit: COMMIT; -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -3 3 -5 5 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-delete-table-1 s1-update-table-2 s2-commit s1-commit s1-select-table-2 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-1: +step s2-delete-table-1: DELETE FROM ref_table_1 WHERE id = 1; -step s1-update-table-2: +step s1-update-table-2: UPDATE ref_table_2 SET id = 0 WHERE value = 2; -step s2-commit: +step s2-commit: COMMIT; step s1-update-table-2: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -3 3 -5 5 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-delete-table-1 s1-delete-table-3 s2-commit s1-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-1: +step s2-delete-table-1: DELETE FROM ref_table_1 WHERE id = 1; -step s1-delete-table-3: +step s1-delete-table-3: DELETE FROM ref_table_3 WHERE value = 1 RETURNING id; -step s2-commit: +step s2-commit: COMMIT; step s1-delete-table-3: <... completed> -id +id -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -3 3 -5 5 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-delete-table-1 s1-insert-table-3 s2-commit s1-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-1: +step s2-delete-table-1: DELETE FROM ref_table_1 WHERE id = 1; -step s1-insert-table-3: +step s1-insert-table-3: INSERT INTO ref_table_3 VALUES (7, 1); -step s2-commit: +step s2-commit: COMMIT; step s1-insert-table-3: <... completed> -error in steps s2-commit s1-insert-table-3: ERROR: insert or update on table "ref_table_3_102058" violates foreign key constraint "ref_table_3_value_fkey_102058" -step s1-commit: +error in steps s2-commit s1-insert-table-3: ERROR: insert or update on table "ref_table_3_xxxxxxx" violates foreign key constraint "ref_table_3_value_fkey_xxxxxxx" +step s1-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -3 3 -5 5 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-delete-table-1 s1-update-table-3 s2-commit s1-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-1: +step s2-delete-table-1: DELETE FROM ref_table_1 WHERE id = 1; -step s1-update-table-3: +step s1-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE value = 1 RETURNING id; -step s2-commit: +step s2-commit: COMMIT; step s1-update-table-3: <... completed> -id +id -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -3 3 -5 5 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-insert-table-1 s1-update-table-3 s2-commit s1-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-1: +step s2-insert-table-1: INSERT INTO ref_table_1 VALUES (7, 7); -step s1-update-table-3: +step s1-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE value = 1 RETURNING id; -step s2-commit: +step s2-commit: COMMIT; step s1-update-table-3: <... completed> -id +id -2 -step s1-commit: +2 +step s1-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -2 1 -3 3 -5 5 +2 1 +3 3 +5 5 starting permutation: s1-begin s2-begin s1-update-table-3 s2-insert-table-1 s1-commit s2-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-table-3: +step s1-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE value = 1 RETURNING id; -id +id -2 -step s2-insert-table-1: +2 +step s2-insert-table-1: INSERT INTO ref_table_1 VALUES (7, 7); -step s1-commit: +step s1-commit: COMMIT; step s2-insert-table-1: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -2 1 -3 3 -5 5 +2 1 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-insert-table-1 s1-update-table-2 s2-commit s1-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-1: +step s2-insert-table-1: INSERT INTO ref_table_1 VALUES (7, 7); -step s1-update-table-2: +step s1-update-table-2: UPDATE ref_table_2 SET id = 0 WHERE value = 2; -step s2-commit: +step s2-commit: COMMIT; step s1-update-table-2: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 +1 1 +3 3 +5 5 starting permutation: s1-begin s2-begin s1-update-table-2 s2-insert-table-1 s1-commit s2-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-table-2: +step s1-update-table-2: UPDATE ref_table_2 SET id = 0 WHERE value = 2; -step s2-insert-table-1: +step s2-insert-table-1: INSERT INTO ref_table_1 VALUES (7, 7); -step s1-commit: +step s1-commit: COMMIT; step s2-insert-table-1: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 +1 1 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-insert-table-2 s1-update-table-3 s2-commit s1-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-2: +step s2-insert-table-2: INSERT INTO ref_table_2 VALUES (7, 5); -step s1-update-table-3: +step s1-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE value = 1 RETURNING id; -step s2-commit: +step s2-commit: COMMIT; step s1-update-table-3: <... completed> -id +id -2 -step s1-commit: +2 +step s1-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -2 1 -3 3 -5 5 +2 1 +3 3 +5 5 starting permutation: s1-begin s2-begin s1-update-table-3 s2-insert-table-2 s1-commit s2-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-table-3: +step s1-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE value = 1 RETURNING id; -id +id -2 -step s2-insert-table-2: +2 +step s2-insert-table-2: INSERT INTO ref_table_2 VALUES (7, 5); -step s1-commit: +step s1-commit: COMMIT; step s2-insert-table-2: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -2 1 -3 3 -5 5 +2 1 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-insert-table-1 s1-select-table-1 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-1: +step s2-insert-table-1: INSERT INTO ref_table_1 VALUES (7, 7); -step s1-select-table-1: +step s1-select-table-1: SELECT * FROM ref_table_1 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-insert-table-1 s1-select-table-2 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-1: +step s2-insert-table-1: INSERT INTO ref_table_1 VALUES (7, 7); -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-insert-table-1 s1-select-table-3 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-1: +step s2-insert-table-1: INSERT INTO ref_table_1 VALUES (7, 7); -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-delete-table-2 s1-select-table-1 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-2: +step s2-delete-table-2: DELETE FROM ref_table_2 WHERE id = 1; -step s1-select-table-1: +step s1-select-table-1: SELECT * FROM ref_table_1 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-delete-table-2 s1-select-table-2 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-2: +step s2-delete-table-2: DELETE FROM ref_table_2 WHERE id = 1; -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-delete-table-2 s1-select-table-3 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-2: +step s2-delete-table-2: DELETE FROM ref_table_2 WHERE id = 1; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-update-table-3 s1-select-table-1 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-3: +step s2-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE id = 1; -step s1-select-table-1: +step s1-select-table-1: SELECT * FROM ref_table_1 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-update-table-3 s1-select-table-2 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-3: +step s2-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE id = 1; -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-update-table-3 s1-select-table-3 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-3: +step s2-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE id = 1; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_ref2ref_foreign_keys_on_mx.out b/src/test/regress/expected/isolation_ref2ref_foreign_keys_on_mx.out index b36eaa365..1dab78f92 100644 --- a/src/test/regress/expected/isolation_ref2ref_foreign_keys_on_mx.out +++ b/src/test/regress/expected/isolation_ref2ref_foreign_keys_on_mx.out @@ -1,658 +1,658 @@ Parsed test spec with 2 sessions starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-update-table-1 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-table-1: + +step s2-update-table-1: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table_1 SET id = 2 WHERE id = 1'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(ExclusiveLock,1)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-1 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-delete-table-1: + +step s2-delete-table-1: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table_1 WHERE id = 1'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(ExclusiveLock,1)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-update-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-table-2: + +step s2-update-table-2: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table_2 SET id = 2 WHERE id = 1'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(ExclusiveLock,2)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-delete-table-2: + +step s2-delete-table-2: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table_2 WHERE id = 1'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(ExclusiveLock,2)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-update-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-table-3: + +step s2-update-table-3: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table_3 SET id = 2 WHERE id = 1'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(ExclusiveLock,3)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-delete-table-3: + +step s2-delete-table-3: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table_3 WHERE id = 1'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(ExclusiveLock,3)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-1 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-table-1: + +step s2-insert-table-1: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table_1 VALUES (7, 7)'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(RowExclusiveLock,1)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-table-2: + +step s2-insert-table-2: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table_2 VALUES (7, 5)'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(RowExclusiveLock,2)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-table-3: + +step s2-insert-table-3: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table_3 VALUES (7, 5)'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(RowExclusiveLock,3)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_ref_select_for_update_vs_all_on_mx.out b/src/test/regress/expected/isolation_ref_select_for_update_vs_all_on_mx.out index 5ea02eb65..7855dadea 100644 --- a/src/test/regress/expected/isolation_ref_select_for_update_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_ref_select_for_update_vs_all_on_mx.out @@ -3,531 +3,531 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-select-for-update: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert: + +step s2-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES (1, 10), (2, 20)'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-insert: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM ref_table; -count +count -2 +2 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select: + +step s2-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-insert-select-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-select-ref-table: + +step s2-insert-select-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table SELECT * FROM ref_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-insert-select-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy: + +step s2-copy: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 1, 10 && echo 2, 20''WITH CSV'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-copy: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM ref_table; -count +count -2 +2 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-alter s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-alter: + +step s2-alter: ALTER TABLE ref_table DROP value; -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-alter: <... completed> -step s2-commit-worker: +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE ref_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-coordinator-create-index-concurrently: + +step s2-coordinator-create-index-concurrently: CREATE INDEX CONCURRENTLY ref_table_index ON ref_table(id); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out b/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out index 9f4ee7360..0b82dab0e 100644 --- a/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out @@ -3,299 +3,299 @@ Parsed test spec with 3 sessions starting permutation: s1-add-primary-key s1-start-session-level-connection s1-begin-on-worker s1-upsert s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_reference_table - -step s1-add-primary-key: + +step s1-add-primary-key: ALTER TABLE ref_table ADD CONSTRAINT pri_key PRIMARY KEY (id); -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-upsert: + +step s1-upsert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES (1, 3), (2, 3) ON CONFLICT (id) DO UPDATE SET value=3'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select: + +step s2-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM ref_table; -count +count -2 +2 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-insert-select-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete: + +step s1-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1 OR id=2'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-select-ref-table: + +step s2-insert-select-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table SELECT * FROM ref_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-insert-select-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM ref_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-add-primary-key s1-start-session-level-connection s1-begin-on-worker s1-upsert s2-start-session-level-connection s2-begin-on-worker s2-drop s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_reference_table - -step s1-add-primary-key: + +step s1-add-primary-key: ALTER TABLE ref_table ADD CONSTRAINT pri_key PRIMARY KEY (id); -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-upsert: + +step s1-upsert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES (1, 3), (2, 3) ON CONFLICT (id) DO UPDATE SET value=3'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-drop: + +step s2-drop: DROP TABLE ref_table; -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-drop: <... completed> -step s2-commit-worker: +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM ref_table; ERROR: relation "ref_table" does not exist restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete: + +step s1-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1 OR id=2'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE ref_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM ref_table; -count +count -0 +0 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_reference_copy_vs_all.out b/src/test/regress/expected/isolation_reference_copy_vs_all.out index c6ed82f07..1cba83a40 100644 --- a/src/test/regress/expected/isolation_reference_copy_vs_all.out +++ b/src/test/regress/expected/isolation_reference_copy_vs_all.out @@ -3,97 +3,97 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-router-select: SELECT * FROM reference_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-real-time-select: SELECT * FROM reference_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM reference_copy AS t1 JOIN reference_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert: INSERT INTO reference_copy VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -101,14 +101,14 @@ step s2-insert-select: INSERT INTO reference_copy SELECT * FROM reference_copy; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -20 +20 starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -116,14 +116,14 @@ step s2-update: UPDATE reference_copy SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s2-update: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -131,14 +131,14 @@ step s2-delete: DELETE FROM reference_copy WHERE id = 1; step s1-commit: COMMIT; step s2-delete: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -146,14 +146,14 @@ step s2-truncate: TRUNCATE reference_copy; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -166,7 +166,7 @@ ERROR: relation "reference_copy" does not exist starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -174,9 +174,9 @@ step s2-ddl-create-index: CREATE INDEX reference_copy_index ON reference_copy(id step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers @@ -186,7 +186,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX reference_copy_index ON reference_copy(id); step s1-begin: BEGIN; @@ -195,9 +195,9 @@ step s2-ddl-drop-index: DROP INDEX reference_copy_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers @@ -207,7 +207,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -215,9 +215,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY reference_copy_ step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers @@ -227,7 +227,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -235,9 +235,9 @@ step s2-ddl-add-column: ALTER TABLE reference_copy ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -247,7 +247,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE reference_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -256,9 +256,9 @@ step s2-ddl-drop-column: ALTER TABLE reference_copy DROP new_column; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -268,7 +268,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -276,9 +276,9 @@ step s2-ddl-rename-column: ALTER TABLE reference_copy RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -288,24 +288,24 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-table-size: SELECT citus_total_relation_size('reference_copy'); citus_total_relation_size -32768 +32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -313,14 +313,14 @@ step s2-master-modify-multiple-shards: DELETE FROM reference_copy; step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_reference_table - + step s1-drop: DROP TABLE reference_copy; step s1-create-non-distributed-table: CREATE TABLE reference_copy(id integer, data text, int_data int); COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; @@ -331,92 +331,92 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_reference_table - -step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count -15 +step s1-select-count: SELECT COUNT(*) FROM reference_copy; +count + +15 starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM reference_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM reference_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM reference_copy AS t1 JOIN reference_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO reference_copy VALUES(0, 'k', 0); step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO reference_copy SELECT * FROM reference_copy; @@ -424,14 +424,14 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE reference_copy SET data = 'l' WHERE id = 0; @@ -439,14 +439,14 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM reference_copy WHERE id = 1; @@ -454,14 +454,14 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE reference_copy; @@ -469,14 +469,14 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE reference_copy; @@ -490,7 +490,7 @@ ERROR: relation "reference_copy" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX reference_copy_index ON reference_copy(id); @@ -498,9 +498,9 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers @@ -510,7 +510,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX reference_copy_index ON reference_copy(id); step s1-begin: BEGIN; @@ -519,9 +519,9 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers @@ -531,7 +531,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE reference_copy ADD new_column int DEFAULT 0; @@ -540,9 +540,9 @@ step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: missing data for column "new_column" step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -552,7 +552,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE reference_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -561,9 +561,9 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -573,7 +573,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE reference_copy RENAME data TO new_column; @@ -581,9 +581,9 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -593,24 +593,24 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('reference_copy'); citus_total_relation_size -32768 +32768 step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM reference_copy; @@ -618,14 +618,14 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -5 +5 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_reference_table - + step s1-drop: DROP TABLE reference_copy; step s1-create-non-distributed-table: CREATE TABLE reference_copy(id integer, data text, int_data int); COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; @@ -633,11 +633,11 @@ step s1-begin: BEGIN; step s1-distribute-table: SELECT create_reference_table('reference_copy'); create_reference_table - + step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -15 +15 diff --git a/src/test/regress/expected/isolation_reference_on_mx.out b/src/test/regress/expected/isolation_reference_on_mx.out index d45f54659..1fed9e597 100644 --- a/src/test/regress/expected/isolation_reference_on_mx.out +++ b/src/test/regress/expected/isolation_reference_on_mx.out @@ -1,616 +1,616 @@ Parsed test spec with 2 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update-ref-table: + +step s1-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete-from-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete-from-ref-table: + +step s1-delete-from-ref-table: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE user_id = 1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-into-ref-table: + +step s1-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-into-ref-table: + +step s1-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-into-ref-table: + +step s2-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy-to-ref-table: + +step s1-copy-to-ref-table: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy-to-ref-table: + +step s1-copy-to-ref-table: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-into-ref-table: + +step s2-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-copy-to-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy-to-ref-table: + +step s1-copy-to-ref-table: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy-to-ref-table: + +step s2-copy-to-ref-table: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-begin s1-alter-table s2-commit-worker s1-commit s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-into-ref-table: + +step s2-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s1-begin: + +step s1-begin: BEGIN; -step s1-alter-table: +step s1-alter-table: ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id); -step s2-commit-worker: +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s1-alter-table: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-stop-connection: +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-select-from-ref-table s1-begin s1-alter-table s2-commit-worker s1-commit s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-from-ref-table: + +step s2-select-from-ref-table: SELECT run_commands_on_session_level_connection_to_node('SELECT count(*) FROM ref_table'); run_commands_on_session_level_connection_to_node - -step s1-begin: + +step s1-begin: BEGIN; -step s1-alter-table: +step s1-alter-table: ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id); -step s2-commit-worker: +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s1-alter-table: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-stop-connection: +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_replace_wait_function.out b/src/test/regress/expected/isolation_replace_wait_function.out index 15c4649b7..c93af44b2 100644 --- a/src/test/regress/expected/isolation_replace_wait_function.out +++ b/src/test/regress/expected/isolation_replace_wait_function.out @@ -3,23 +3,23 @@ Parsed test spec with 2 sessions starting permutation: s1-insert-1 s2-insert s1-commit s2-commit create_distributed_table - -step s1-insert-1: + +step s1-insert-1: BEGIN; INSERT INTO test_locking (a) VALUES (1); -step s2-insert: +step s2-insert: BEGIN; INSERT INTO test_locking (a) VALUES (1); -step s1-commit: +step s1-commit: COMMIT; step s2-insert: <... completed> error in steps s1-commit s2-insert: ERROR: duplicate key value violates unique constraint "test_locking_a_key_1400001" -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out index eae88c220..110178365 100644 --- a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out @@ -3,17 +3,17 @@ Parsed test spec with 3 sessions starting permutation: s1-begin s2-begin s1-update-dist-table s2-lock-ref-table-placement-on-coordinator s1-lock-ref-table-placement-on-coordinator s2-update-dist-table deadlock-checker-call s1-end s2-end create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-dist-table: +step s1-update-dist-table: update dist_table set b = 2 where a = 1; -step s2-lock-ref-table-placement-on-coordinator: +step s2-lock-ref-table-placement-on-coordinator: DO $$ DECLARE refshardid int; BEGIN @@ -22,7 +22,7 @@ step s2-lock-ref-table-placement-on-coordinator: END $$; -step s1-lock-ref-table-placement-on-coordinator: +step s1-lock-ref-table-placement-on-coordinator: DO $$ DECLARE refshardid int; BEGIN @@ -31,113 +31,113 @@ step s1-lock-ref-table-placement-on-coordinator: END $$; -step s2-update-dist-table: +step s2-update-dist-table: update dist_table set b = 2 where a = 1; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s1-lock-ref-table-placement-on-coordinator: <... completed> step s2-update-dist-table: <... completed> error in steps deadlock-checker-call s1-lock-ref-table-placement-on-coordinator s2-update-dist-table: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-end: +step s1-end: END; -step s2-end: +step s2-end: END; master_remove_node - + starting permutation: s1-begin s2-begin s1-update-ref-table s2-sleep s2-view-dist s2-view-worker s2-end s1-end create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-ref-table: +step s1-update-ref-table: update ref_table set a = a + 1; -step s2-sleep: +step s2-sleep: SELECT pg_sleep(0.5); -pg_sleep +pg_sleep - -step s2-view-dist: + +step s2-view-dist: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname update ref_table set a = a + 1; -coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression update ref_table set a = a + 1; -localhost 57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s2-view-worker: - SELECT query, query_hostname, query_hostport, master_query_host_name, - master_query_host_port, state, wait_event_type, wait_event, usename, datname +localhost 57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s2-view-worker: + SELECT query, query_hostname, query_hostport, master_query_host_name, + master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity - WHERE query NOT ILIKE '%pg_prepared_xacts%' AND + WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' AND query NOT ILIKE '%dump_local_wait_edges%' ORDER BY query, query_hostport DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname -UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression -UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression -UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression -UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s2-end: +UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression +UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression +UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s2-end: END; -step s1-end: +step s1-end: END; master_remove_node - + starting permutation: s1-begin s2-begin s1-update-ref-table s2-active-transactions s1-end s2-end create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-ref-table: +step s1-update-ref-table: update ref_table set a = a + 1; -step s2-active-transactions: +step s2-active-transactions: -- Admin should be able to see all transactions SELECT count(*) FROM get_all_active_transactions(); SELECT count(*) FROM get_global_active_transactions(); -count +count -2 -count +2 +count -6 -step s1-end: +6 +step s1-end: END; -step s2-end: +step s2-end: END; master_remove_node - + diff --git a/src/test/regress/expected/isolation_select_for_update.out b/src/test/regress/expected/isolation_select_for_update.out index c1d655d60..7a5be5ff1 100644 --- a/src/test/regress/expected/isolation_select_for_update.out +++ b/src/test/regress/expected/isolation_select_for_update.out @@ -1,103 +1,103 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-select-from-t1-t2-for-update s2-begin s2-update-t1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-t2-for-update: +step s1-select-from-t1-t2-for-update: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN test_table_2_rf1 as tt2 on tt1.id = tt2.id - WHERE tt1.id = 1 + WHERE tt1.id = 1 ORDER BY 1 FOR UPDATE; -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-begin: +1 2 1 2 +step s2-begin: BEGIN; -step s2-update-t1: +step s2-update-t1: UPDATE test_table_1_rf1 SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update-t1: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-t2-for-share s2-begin s2-delete-t1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-t2-for-share: +step s1-select-from-t1-t2-for-share: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN test_table_2_rf1 as tt2 on tt1.id = tt2.id - WHERE tt1.id = 1 + WHERE tt1.id = 1 ORDER BY 1 FOR SHARE; -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-begin: +1 2 1 2 +step s2-begin: BEGIN; -step s2-delete-t1: +step s2-delete-t1: DELETE FROM test_table_1_rf1 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-delete-t1: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-rt-for-update s2-begin s2-update-t1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-rt-for-update: +step s1-select-from-t1-rt-for-update: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN ref_table as rt1 on tt1.id = rt1.id WHERE tt1.id = 1 ORDER BY 1 FOR UPDATE; -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-begin: +1 2 1 2 +step s2-begin: BEGIN; -step s2-update-t1: +step s2-update-t1: UPDATE test_table_1_rf1 SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update-t1: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-rt-with-lc-for-update s2-begin s2-update-rt s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-rt-with-lc-for-update: +step s1-select-from-t1-rt-with-lc-for-update: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN ref_table as rt1 on tt1.id = rt1.id WHERE tt1.id = 1 @@ -105,31 +105,31 @@ step s1-select-from-t1-rt-with-lc-for-update: FOR UPDATE OF rt1; -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-begin: +1 2 1 2 +step s2-begin: BEGIN; -step s2-update-rt: +step s2-update-rt: UPDATE ref_table SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update-rt: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-rt-with-lc-for-update s2-begin s2-update-t1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-rt-with-lc-for-update: +step s1-select-from-t1-rt-with-lc-for-update: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN ref_table as rt1 on tt1.id = rt1.id WHERE tt1.id = 1 @@ -137,265 +137,265 @@ step s1-select-from-t1-rt-with-lc-for-update: FOR UPDATE OF rt1; -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-begin: +1 2 1 2 +step s2-begin: BEGIN; -step s2-update-t1: +step s2-update-t1: UPDATE test_table_1_rf1 SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-t2-for-share s2-begin s2-select-from-t1-t2-for-share s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-t2-for-share: +step s1-select-from-t1-t2-for-share: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN test_table_2_rf1 as tt2 on tt1.id = tt2.id - WHERE tt1.id = 1 + WHERE tt1.id = 1 ORDER BY 1 FOR SHARE; -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-begin: +1 2 1 2 +step s2-begin: BEGIN; -step s2-select-from-t1-t2-for-share: +step s2-select-from-t1-t2-for-share: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN test_table_1_rf1 as tt2 on tt1.id = tt2.id - WHERE tt1.id = 1 + WHERE tt1.id = 1 ORDER BY 1 FOR SHARE; -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s1-commit: +1 2 1 2 +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-rt-for-update s2-begin s2-select-from-t1-t2-for-update s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-rt-for-update: +step s1-select-from-t1-rt-for-update: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN ref_table as rt1 on tt1.id = rt1.id WHERE tt1.id = 1 ORDER BY 1 FOR UPDATE; -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-begin: +1 2 1 2 +step s2-begin: BEGIN; -step s2-select-from-t1-t2-for-update: +step s2-select-from-t1-t2-for-update: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN test_table_1_rf1 as tt2 on tt1.id = tt2.id - WHERE tt1.id = 1 + WHERE tt1.id = 1 ORDER BY 1 FOR UPDATE; -step s1-commit: +step s1-commit: COMMIT; step s2-select-from-t1-t2-for-update: <... completed> -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-commit: +1 2 1 2 +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-within-cte s2-begin s2-select-from-t1-t2-for-update s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-within-cte: +step s1-select-from-t1-within-cte: WITH first_value AS ( SELECT val_1 FROM test_table_1_rf1 WHERE id = 1 FOR UPDATE) SELECT * FROM first_value; -val_1 +val_1 -2 -step s2-begin: +2 +step s2-begin: BEGIN; -step s2-select-from-t1-t2-for-update: +step s2-select-from-t1-t2-for-update: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN test_table_1_rf1 as tt2 on tt1.id = tt2.id - WHERE tt1.id = 1 + WHERE tt1.id = 1 ORDER BY 1 FOR UPDATE; -step s1-commit: +step s1-commit: COMMIT; step s2-select-from-t1-t2-for-update: <... completed> -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-commit: +1 2 1 2 +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-within-cte s2-begin s2-update-t1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-within-cte: +step s1-select-from-t1-within-cte: WITH first_value AS ( SELECT val_1 FROM test_table_1_rf1 WHERE id = 1 FOR UPDATE) SELECT * FROM first_value; -val_1 +val_1 -2 -step s2-begin: +2 +step s2-begin: BEGIN; -step s2-update-t1: +step s2-update-t1: UPDATE test_table_1_rf1 SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update-t1: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-with-subquery s2-begin s2-update-t1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-with-subquery: +step s1-select-from-t1-with-subquery: SELECT * FROM (SELECT * FROM test_table_1_rf1 FOR UPDATE) foo WHERE id = 1; -id val_1 +id val_1 -1 2 -step s2-begin: +1 2 +step s2-begin: BEGIN; -step s2-update-t1: +step s2-update-t1: UPDATE test_table_1_rf1 SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update-t1: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-rt-with-subquery s2-begin s2-update-rt s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-rt-with-subquery: +step s1-select-from-rt-with-subquery: SELECT * FROM (SELECT * FROM ref_table FOR UPDATE) foo WHERE id = 1; -id val_1 +id val_1 -1 2 -step s2-begin: +1 2 +step s2-begin: BEGIN; -step s2-update-rt: +step s2-update-rt: UPDATE ref_table SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update-rt: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-with-view s2-begin s2-update-t1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-with-view: +step s1-select-from-t1-with-view: SELECT * FROM test_1 WHERE id = 1 FOR UPDATE; -id val_1 +id val_1 -1 2 -step s2-begin: +1 2 +step s2-begin: BEGIN; -step s2-update-t1: +step s2-update-t1: UPDATE test_table_1_rf1 SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update-t1: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-update-rt-with-cte-select-from-rt s2-begin s2-update-rt s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-update-rt-with-cte-select-from-rt: +step s1-update-rt-with-cte-select-from-rt: WITH foo AS (SELECT * FROM ref_table FOR UPDATE) UPDATE ref_table SET val_1 = 4 FROM foo WHERE ref_table.id = foo.id; -step s2-begin: +step s2-begin: BEGIN; -step s2-update-rt: +step s2-update-rt: UPDATE ref_table SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update-rt: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_select_vs_all.out b/src/test/regress/expected/isolation_select_vs_all.out index f4d1a3331..b1136066b 100644 --- a/src/test/regress/expected/isolation_select_vs_all.out +++ b/src/test/regress/expected/isolation_select_vs_all.out @@ -3,345 +3,345 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-router-select s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-router-select s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-router-select s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 -step s2-task-tracker-select: +1 b 1 +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 -step s2-task-tracker-select: +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 -step s2-task-tracker-select: +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-router-select s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-router-select s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-router-select s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-router-select s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-delete: DELETE FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -4 +4 starting permutation: s1-initialize s1-begin s1-router-select s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-truncate: TRUNCATE select_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-router-select s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-drop: DROP TABLE select_append; step s1-commit: COMMIT; step s2-drop: <... completed> @@ -351,19 +351,19 @@ ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -373,21 +373,21 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-router-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-ddl-drop-index: DROP INDEX select_append_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -397,19 +397,19 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -419,20 +419,20 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -442,21 +442,21 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-router-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data new_column +id data int_data new_column -1 b 1 0 +1 b 1 0 step s2-ddl-drop-column: ALTER TABLE select_append DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -466,20 +466,20 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -489,181 +489,181 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-router-select s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size -32768 +32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-router-select s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-master-modify-multiple-shards: DELETE FROM select_append; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s2-master-apply-delete-command s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s2-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM select_append WHERE id <= 4;'); master_apply_delete_command -1 +1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s2-master-drop-all-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s2-master-drop-all-shards: SELECT master_drop_all_shards('select_append'::regclass, 'public', 'append_copy'); master_drop_all_shards -1 +1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-router-select s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data step s2-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table - + step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-insert s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-insert-select s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-update s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-delete s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM select_append WHERE id = 1; step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -4 +4 starting permutation: s1-initialize s1-begin s1-truncate s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE select_append; step s2-router-select: SELECT * FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s2-router-select: <... completed> -id data int_data +id data int_data step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-drop s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE select_append; @@ -677,19 +677,19 @@ ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-router-select s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -699,7 +699,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-router-select s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; @@ -707,13 +707,13 @@ step s1-ddl-drop-index: DROP INDEX select_append_index; step s2-router-select: SELECT * FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s2-router-select: <... completed> -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -723,20 +723,20 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-router-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s2-router-select: SELECT * FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s2-router-select: <... completed> -id data int_data new_column +id data int_data new_column -1 b 1 0 +1 b 1 0 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -746,7 +746,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-router-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -754,13 +754,13 @@ step s1-ddl-drop-column: ALTER TABLE select_append DROP new_column; step s2-router-select: SELECT * FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s2-router-select: <... completed> -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -770,20 +770,20 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-router-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s2-router-select: SELECT * FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s2-router-select: <... completed> -id new_column int_data +id new_column int_data -1 b 1 +1 b 1 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -793,212 +793,212 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size -32768 +32768 step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM select_append; step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-master-apply-delete-command s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM select_append WHERE id <= 4;'); master_apply_delete_command -1 +1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('select_append'::regclass, 'public', 'append_copy'); master_drop_all_shards -1 +1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table - + step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-real-time-select s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-real-time-select s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-delete: DELETE FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -4 +4 starting permutation: s1-initialize s1-begin s1-real-time-select s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-truncate: TRUNCATE select_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-real-time-select s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-drop: DROP TABLE select_append; step s1-commit: COMMIT; step s2-drop: <... completed> @@ -1008,23 +1008,23 @@ ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -1034,25 +1034,25 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-real-time-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-ddl-drop-index: DROP INDEX select_append_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -1062,23 +1062,23 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -1088,24 +1088,24 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1115,25 +1115,25 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-real-time-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data new_column +id data int_data new_column -0 a 0 0 -1 b 1 0 -2 c 2 0 -3 d 3 0 -4 e 4 0 +0 a 0 0 +1 b 1 0 +2 c 2 0 +3 d 3 0 +4 e 4 0 step s2-ddl-drop-column: ALTER TABLE select_append DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1143,24 +1143,24 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1170,173 +1170,173 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-real-time-select s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size -32768 +32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-master-modify-multiple-shards: DELETE FROM select_append; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-real-time-select s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data step s2-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table - + step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-insert s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-insert-select s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-update s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-delete s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM select_append WHERE id = 1; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -4 +4 starting permutation: s1-initialize s1-begin s1-truncate s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE select_append; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; step s1-commit: COMMIT; step s2-real-time-select: <... completed> -id data int_data +id data int_data step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-drop s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE select_append; @@ -1350,23 +1350,23 @@ ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-real-time-select s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -1376,7 +1376,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-real-time-select s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; @@ -1384,17 +1384,17 @@ step s1-ddl-drop-index: DROP INDEX select_append_index; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; step s1-commit: COMMIT; step s2-real-time-select: <... completed> -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -1404,24 +1404,24 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-real-time-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; step s1-commit: COMMIT; step s2-real-time-select: <... completed> -id data int_data new_column +id data int_data new_column -0 a 0 0 -1 b 1 0 -2 c 2 0 -3 d 3 0 -4 e 4 0 +0 a 0 0 +1 b 1 0 +2 c 2 0 +3 d 3 0 +4 e 4 0 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1431,7 +1431,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-real-time-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -1439,17 +1439,17 @@ step s1-ddl-drop-column: ALTER TABLE select_append DROP new_column; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; step s1-commit: COMMIT; step s2-real-time-select: <... completed> -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1459,24 +1459,24 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-real-time-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; step s1-commit: COMMIT; step s2-real-time-select: <... completed> -id new_column int_data +id new_column int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1486,206 +1486,206 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size -32768 +32768 step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM select_append; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table - + step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-delete: DELETE FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -4 +4 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-truncate: TRUNCATE select_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-drop: DROP TABLE select_append; step s1-commit: COMMIT; step s2-drop: <... completed> @@ -1695,26 +1695,26 @@ ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -1724,28 +1724,28 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-task-tracker-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-ddl-drop-index: DROP INDEX select_append_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -1755,26 +1755,26 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -1784,27 +1784,27 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1814,28 +1814,28 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-task-tracker-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data new_column id data int_data new_column +id data int_data new_column id data int_data new_column -0 a 0 0 0 a 0 0 -1 b 1 0 1 b 1 0 -2 c 2 0 2 c 2 0 -3 d 3 0 3 d 3 0 -4 e 4 0 4 e 4 0 +0 a 0 0 0 a 0 0 +1 b 1 0 1 b 1 0 +2 c 2 0 2 c 2 0 +3 d 3 0 3 d 3 0 +4 e 4 0 4 e 4 0 step s2-ddl-drop-column: ALTER TABLE select_append DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1845,27 +1845,27 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1875,201 +1875,201 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size -32768 +32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-master-modify-multiple-shards: DELETE FROM select_append; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-task-tracker-select s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data step s2-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table - + step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-insert s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO select_append VALUES(0, 'k', 0); -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-insert-select s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO select_append SELECT * FROM select_append; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-update s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE select_append SET data = 'l' WHERE id = 0; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-delete s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM select_append WHERE id = 1; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -4 +4 starting permutation: s1-initialize s1-begin s1-truncate s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE select_append; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> -id data int_data id data int_data +id data int_data id data int_data step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-drop s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE select_append; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; @@ -2082,26 +2082,26 @@ ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-task-tracker-select s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -2111,28 +2111,28 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-task-tracker-select s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX select_append_index; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -2142,27 +2142,27 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> -id data int_data new_column id data int_data new_column +id data int_data new_column id data int_data new_column -0 a 0 0 0 a 0 0 -1 b 1 0 1 b 1 0 -2 c 2 0 2 c 2 0 -3 d 3 0 3 d 3 0 -4 e 4 0 4 e 4 0 +0 a 0 0 0 a 0 0 +1 b 1 0 1 b 1 0 +2 c 2 0 2 c 2 0 +3 d 3 0 3 d 3 0 +4 e 4 0 4 e 4 0 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -2172,28 +2172,28 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE select_append DROP new_column; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -2203,27 +2203,27 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> -id new_column int_data id new_column int_data +id new_column int_data id new_column int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -2233,73 +2233,73 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size -32768 -step s2-task-tracker-select: +32768 +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM select_append; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table - -step s2-task-tracker-select: + +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 diff --git a/src/test/regress/expected/isolation_select_vs_all_on_mx.out b/src/test/regress/expected/isolation_select_vs_all_on_mx.out index 933e884f5..5c5ce570f 100644 --- a/src/test/regress/expected/isolation_select_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_select_vs_all_on_mx.out @@ -1,426 +1,426 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select: + +step s1-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select: + +step s2-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select: + +step s1-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-select: + +step s2-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO select_table SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM select_table; -count +count -10 +10 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-delete s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select: + +step s1-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-delete: + +step s2-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM select_table WHERE id = 1'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM select_table; -count +count -4 +4 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select: + +step s1-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy: + +step s2-copy: SELECT run_commands_on_session_level_connection_to_node('COPY select_table FROM PROGRAM ''echo 9, 90 && echo 10, 100''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM select_table; -count +count -7 +7 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-begin s2-index s1-commit-worker s2-commit s1-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select: + +step s1-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s2-begin: + +step s2-begin: BEGIN; -step s2-index: +step s2-index: CREATE INDEX select_index ON select_table(id); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit: + +step s2-commit: COMMIT; -step s1-stop-connection: +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select: + +step s1-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table WHERE id = 6 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select: + +step s1-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s2-coordinator-create-index-concurrently: + +step s2-coordinator-create-index-concurrently: CREATE INDEX CONCURRENTLY select_table_index ON select_table(id); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_shouldhaveshards.out b/src/test/regress/expected/isolation_shouldhaveshards.out index faa82b86a..8de483c5e 100644 --- a/src/test/regress/expected/isolation_shouldhaveshards.out +++ b/src/test/regress/expected/isolation_shouldhaveshards.out @@ -1,22 +1,22 @@ Parsed test spec with 2 sessions starting permutation: s1-add-second-node s1-begin s2-begin s2-create-distributed-table s1-noshards s2-commit s1-commit s2-shardcounts -?column? +?column? -1 -step s1-add-second-node: +1 +step s1-add-second-node: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-create-distributed-table: +step s2-create-distributed-table: CREATE TABLE t1 (a int); -- session needs to have replication factor set to 1, can't do in setup SET citus.shard_replication_factor TO 1; @@ -24,145 +24,145 @@ step s2-create-distributed-table: create_distributed_table - -step s1-noshards: + +step s1-noshards: SELECT * from master_set_node_property('localhost', 57637, 'shouldhaveshards', false); -step s2-commit: +step s2-commit: COMMIT; step s1-noshards: <... completed> master_set_node_property - -step s1-commit: + +step s1-commit: COMMIT; -step s2-shardcounts: +step s2-shardcounts: SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 't1'::regclass GROUP BY nodeport ORDER BY nodeport; -nodeport count +nodeport count -57637 2 -57638 2 +57637 2 +57638 2 master_remove_node - - + + starting permutation: s1-add-second-node s1-begin s2-begin s1-noshards s2-create-distributed-table s1-commit s2-commit s2-shardcounts -?column? +?column? -1 -step s1-add-second-node: +1 +step s1-add-second-node: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-noshards: +step s1-noshards: SELECT * from master_set_node_property('localhost', 57637, 'shouldhaveshards', false); master_set_node_property - -step s2-create-distributed-table: + +step s2-create-distributed-table: CREATE TABLE t1 (a int); -- session needs to have replication factor set to 1, can't do in setup SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t1', 'a'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-distributed-table: <... completed> create_distributed_table - -step s2-commit: + +step s2-commit: COMMIT; -step s2-shardcounts: +step s2-shardcounts: SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 't1'::regclass GROUP BY nodeport ORDER BY nodeport; -nodeport count +nodeport count -57638 4 +57638 4 master_remove_node - - + + starting permutation: s1-begin s2-begin s1-noshards s2-update-node s1-commit s2-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-noshards: +step s1-noshards: SELECT * from master_set_node_property('localhost', 57637, 'shouldhaveshards', false); master_set_node_property - -step s2-update-node: + +step s2-update-node: select * from master_update_node((select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 57638) -step s1-commit: +step s1-commit: COMMIT; step s2-update-node: <... completed> master_update_node - -step s2-commit: + +step s2-commit: COMMIT; master_remove_node - + starting permutation: s1-begin s2-begin s2-update-node s1-noshards s2-commit s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-update-node: +step s2-update-node: select * from master_update_node((select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 57638) master_update_node - -step s1-noshards: + +step s1-noshards: SELECT * from master_set_node_property('localhost', 57637, 'shouldhaveshards', false); -step s2-commit: +step s2-commit: COMMIT; step s1-noshards: <... completed> -error in steps s2-commit s1-noshards: ERROR: node at "localhost:57637" does not exist -step s1-commit: +error in steps s2-commit s1-noshards: ERROR: node at "localhost:xxxxx" does not exist +step s1-commit: COMMIT; master_remove_node - + diff --git a/src/test/regress/expected/isolation_transaction_recovery.out b/src/test/regress/expected/isolation_transaction_recovery.out index 411a75feb..461bfabd3 100644 --- a/src/test/regress/expected/isolation_transaction_recovery.out +++ b/src/test/regress/expected/isolation_transaction_recovery.out @@ -3,43 +3,43 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-recover s2-insert s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-recover: +step s1-recover: SELECT recover_prepared_transactions(); recover_prepared_transactions -0 -step s2-insert: +0 +step s2-insert: INSERT INTO test_transaction_recovery VALUES (1,2); -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s1-recover s2-recover s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-recover: +step s1-recover: SELECT recover_prepared_transactions(); recover_prepared_transactions -0 -step s2-recover: +0 +step s2-recover: SELECT recover_prepared_transactions(); -step s1-commit: +step s1-commit: COMMIT; step s2-recover: <... completed> recover_prepared_transactions -0 +0 diff --git a/src/test/regress/expected/isolation_truncate_vs_all.out b/src/test/regress/expected/isolation_truncate_vs_all.out index a555891d8..930c5ef25 100644 --- a/src/test/regress/expected/isolation_truncate_vs_all.out +++ b/src/test/regress/expected/isolation_truncate_vs_all.out @@ -3,7 +3,7 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -13,17 +13,17 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -33,17 +33,17 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-drop s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -56,12 +56,12 @@ step s1-select-count: SELECT COUNT(*) FROM truncate_append; ERROR: relation "truncate_append" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -71,9 +71,9 @@ step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers @@ -81,12 +81,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-truncate s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id); step s1-begin: BEGIN; @@ -97,9 +97,9 @@ step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers @@ -107,12 +107,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-truncate s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; @@ -120,9 +120,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY truncate_append step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers @@ -130,12 +130,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -145,9 +145,9 @@ step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -155,12 +155,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-truncate s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -171,9 +171,9 @@ step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -181,12 +181,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -196,9 +196,9 @@ step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -206,12 +206,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-table-size s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -221,20 +221,20 @@ step s1-commit: COMMIT; step s2-table-size: <... completed> citus_total_relation_size -0 +0 step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -244,17 +244,17 @@ step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-apply-delete-command s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -264,20 +264,20 @@ step s1-commit: COMMIT; step s2-master-apply-delete-command: <... completed> master_apply_delete_command -0 +0 step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-drop-all-shards s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -287,20 +287,20 @@ step s1-commit: COMMIT; step s2-master-drop-all-shards: <... completed> master_drop_all_shards -0 +0 step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-truncate s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE truncate_append; step s1-create-non-distributed-table: CREATE TABLE truncate_append(id integer, data text); step s1-begin: BEGIN; @@ -311,20 +311,20 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - + step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -334,17 +334,17 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -358,12 +358,12 @@ step s1-select-count: SELECT COUNT(*) FROM truncate_append; ERROR: relation "truncate_append" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -373,9 +373,9 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers @@ -383,12 +383,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id); step s1-begin: BEGIN; @@ -399,9 +399,9 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers @@ -409,12 +409,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -424,9 +424,9 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -434,12 +434,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -450,9 +450,9 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -460,12 +460,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -475,9 +475,9 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -485,34 +485,34 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('truncate_append'); citus_total_relation_size -32768 +32768 step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -522,63 +522,63 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-master-apply-delete-command s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM truncate_append WHERE id <= 4;'); master_apply_delete_command -1 +1 step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-master-drop-all-shards s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append'); master_drop_all_shards -1 +1 step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-distribute-table s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE truncate_append; step s1-create-non-distributed-table: CREATE TABLE truncate_append(id integer, data text); step s1-begin: BEGIN; @@ -586,15 +586,15 @@ step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('truncate_append', 'id', 'append'); create_distributed_table - + step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out b/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out index 0f59eaae6..9432fcb58 100644 --- a/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out @@ -1,484 +1,484 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-truncate s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-truncate: + +step s1-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM truncate_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select: + +step s1-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM truncate_table WHERE id = 6'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM truncate_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select: + +step s1-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO truncate_table SELECT * FROM truncate_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM truncate_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete: + +step s1-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM truncate_table WHERE id IN (5, 6, 7)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM truncate_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy: + +step s1-copy: SELECT run_commands_on_session_level_connection_to_node('COPY truncate_table FROM PROGRAM ''echo 5, 50 && echo 9, 90 && echo 10, 100''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM truncate_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-begin s1-alter s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit s2-commit-worker s2-stop-connection s3-select-count -step s1-begin: +step s1-begin: BEGIN; -step s1-alter: +step s1-alter: ALTER TABLE truncate_table DROP value; -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); -step s1-commit: +step s1-commit: COMMIT; step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM truncate_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM truncate_table WHERE id=5 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM truncate_table; -count +count -0 +0 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_update_delete_upsert_vs_all_on_mx.out b/src/test/regress/expected/isolation_update_delete_upsert_vs_all_on_mx.out index 8e57fe7a6..e52a21b8d 100644 --- a/src/test/regress/expected/isolation_update_delete_upsert_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_update_delete_upsert_vs_all_on_mx.out @@ -3,287 +3,287 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-delete s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_distributed_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update: + +step s1-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE dist_table SET value=15 WHERE id=5'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-delete: + +step s2-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM dist_table WHERE id=5'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_distributed_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete: + +step s1-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM dist_table WHERE id=5'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy: + +step s2-copy: SELECT run_commands_on_session_level_connection_to_node('COPY dist_table FROM PROGRAM ''echo 5, 50 && echo 9, 90 && echo 10, 100''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -3 +3 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-alter-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_distributed_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update: + +step s1-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE dist_table SET value=15 WHERE id=5'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-alter-table: + +step s2-alter-table: ALTER TABLE dist_table DROP value; -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-alter-table: <... completed> -step s2-commit-worker: +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection create_distributed_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update: + +step s1-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE dist_table SET value=15 WHERE id=5'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id=5 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_update_node.out b/src/test/regress/expected/isolation_update_node.out index 4cb171384..7b22761dc 100644 --- a/src/test/regress/expected/isolation_update_node.out +++ b/src/test/regress/expected/isolation_update_node.out @@ -1,135 +1,135 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-update-node-1 s2-update-node-2 s1-commit s1-show-nodes -nodeid nodename nodeport +nodeid nodename nodeport -22 localhost 57637 -23 localhost 57638 -step s1-begin: +22 localhost 57637 +23 localhost 57638 +step s1-begin: BEGIN; -step s1-update-node-1: +step s1-update-node-1: SELECT 1 FROM master_update_node( (select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 58637); -?column? +?column? -1 -step s2-update-node-2: +1 +step s2-update-node-2: SELECT 1 FROM master_update_node( (select nodeid from pg_dist_node where nodeport = 57638), 'localhost', 58638); -step s1-commit: +step s1-commit: COMMIT; step s2-update-node-2: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodeid, nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodeid nodename nodeport isactive +nodeid nodename nodeport isactive -22 localhost 58637 t -23 localhost 58638 t -nodeid nodename nodeport +22 localhost 58637 t +23 localhost 58638 t +nodeid nodename nodeport starting permutation: s1-begin s1-update-node-1 s2-begin s2-update-node-1 s1-commit s2-abort s1-show-nodes -nodeid nodename nodeport +nodeid nodename nodeport -24 localhost 57637 -25 localhost 57638 -step s1-begin: +24 localhost 57637 +25 localhost 57638 +step s1-begin: BEGIN; -step s1-update-node-1: +step s1-update-node-1: SELECT 1 FROM master_update_node( (select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 58637); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-update-node-1: +step s2-update-node-1: SELECT 1 FROM master_update_node( (select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 58637); -step s1-commit: +step s1-commit: COMMIT; step s2-update-node-1: <... completed> -?column? +?column? -1 -step s2-abort: +1 +step s2-abort: ABORT; -step s1-show-nodes: +step s1-show-nodes: SELECT nodeid, nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodeid nodename nodeport isactive +nodeid nodename nodeport isactive -25 localhost 57638 t -24 localhost 58637 t -nodeid nodename nodeport +25 localhost 57638 t +24 localhost 58637 t +nodeid nodename nodeport starting permutation: s1-begin s1-update-node-1 s2-start-metadata-sync-node-2 s1-commit s2-verify-metadata -nodeid nodename nodeport +nodeid nodename nodeport -26 localhost 57637 -27 localhost 57638 -step s1-begin: +26 localhost 57637 +27 localhost 57638 +step s1-begin: BEGIN; -step s1-update-node-1: +step s1-update-node-1: SELECT 1 FROM master_update_node( (select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 58637); -?column? +?column? -1 -step s2-start-metadata-sync-node-2: +1 +step s2-start-metadata-sync-node-2: SELECT start_metadata_sync_to_node('localhost', 57638); -step s1-commit: +step s1-commit: COMMIT; step s2-start-metadata-sync-node-2: <... completed> start_metadata_sync_to_node - -step s2-verify-metadata: + +step s2-verify-metadata: SELECT nodeid, groupid, nodename, nodeport FROM pg_dist_node ORDER BY nodeid; SELECT master_run_on_worker( ARRAY['localhost'], ARRAY[57638], ARRAY['SELECT jsonb_agg(ROW(nodeid, groupid, nodename, nodeport) ORDER BY nodeid) FROM pg_dist_node'], false); -nodeid groupid nodename nodeport +nodeid groupid nodename nodeport -26 26 localhost 58637 -27 27 localhost 57638 +26 26 localhost 58637 +27 27 localhost 57638 master_run_on_worker (localhost,57638,t,"[{""f1"": 26, ""f2"": 26, ""f3"": ""localhost"", ""f4"": 58637}, {""f1"": 27, ""f2"": 27, ""f3"": ""localhost"", ""f4"": 57638}]") -nodeid nodename nodeport +nodeid nodename nodeport diff --git a/src/test/regress/expected/isolation_update_node_lock_writes.out b/src/test/regress/expected/isolation_update_node_lock_writes.out index b9286cfad..dcaa5b991 100644 --- a/src/test/regress/expected/isolation_update_node_lock_writes.out +++ b/src/test/regress/expected/isolation_update_node_lock_writes.out @@ -3,62 +3,62 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-update-node-1 s2-begin s2-insert s1-commit s2-abort create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-update-node-1: +step s1-update-node-1: SELECT 1 FROM master_update_node( (select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 57638); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-insert: +step s2-insert: INSERT INTO update_node(id, f1) SELECT id, md5(id::text) FROM generate_series(1, 10) as t(id); -step s1-commit: +step s1-commit: COMMIT; step s2-insert: <... completed> error in steps s1-commit s2-insert: ERROR: relation "public.update_node_102008" does not exist -step s2-abort: +step s2-abort: ABORT; -nodeid nodename nodeport +nodeid nodename nodeport starting permutation: s2-begin s2-insert s1-update-node-1 s2-commit create_distributed_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-insert: +step s2-insert: INSERT INTO update_node(id, f1) SELECT id, md5(id::text) FROM generate_series(1, 10) as t(id); -step s1-update-node-1: +step s1-update-node-1: SELECT 1 FROM master_update_node( (select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-update-node-1: <... completed> -?column? +?column? -1 -nodeid nodename nodeport +1 +nodeid nodename nodeport diff --git a/src/test/regress/expected/isolation_update_vs_all.out b/src/test/regress/expected/isolation_update_vs_all.out index 18490362a..42090fcf2 100644 --- a/src/test/regress/expected/isolation_update_vs_all.out +++ b/src/test/regress/expected/isolation_update_vs_all.out @@ -3,7 +3,7 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-update s2-update s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -13,17 +13,17 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-delete s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -33,17 +33,17 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -4 +4 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -53,17 +53,17 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-drop s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -76,12 +76,12 @@ step s1-select-count: SELECT COUNT(*) FROM update_hash; ERROR: relation "update_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -91,9 +91,9 @@ step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers @@ -101,12 +101,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-update s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX update_hash_index ON update_hash(id); step s1-begin: BEGIN; @@ -117,9 +117,9 @@ step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers @@ -127,12 +127,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-update s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE update_hash SET data = 'l' WHERE id = 4; @@ -140,9 +140,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY update_hash_ind step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers @@ -150,12 +150,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -165,9 +165,9 @@ step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -175,12 +175,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-update s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE update_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -191,9 +191,9 @@ step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -201,12 +201,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -216,9 +216,9 @@ step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -226,12 +226,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-table-size s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -239,21 +239,21 @@ step s1-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s2-table-size: SELECT citus_total_relation_size('update_hash'); citus_total_relation_size -57344 +57344 step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -263,17 +263,17 @@ step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-update s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE update_hash; step s1-create-non-distributed-table: CREATE TABLE update_hash(id integer, data text); COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -285,20 +285,20 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - + step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -10 +10 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-update s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -308,17 +308,17 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -4 +4 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-update s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -328,17 +328,17 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-update s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -352,12 +352,12 @@ step s1-select-count: SELECT COUNT(*) FROM update_hash; ERROR: relation "update_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-update s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -367,9 +367,9 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers @@ -377,12 +377,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-update s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX update_hash_index ON update_hash(id); step s1-begin: BEGIN; @@ -393,9 +393,9 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers @@ -403,12 +403,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-update s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -418,9 +418,9 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -428,12 +428,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-update s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE update_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -444,9 +444,9 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -454,12 +454,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-update s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -470,9 +470,9 @@ step s2-update: <... completed> error in steps s1-commit s2-update: ERROR: column "data" of relation "update_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -480,34 +480,34 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-update s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('update_hash'); citus_total_relation_size -57344 +57344 step s2-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-update s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -517,17 +517,17 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-update s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE update_hash; step s1-create-non-distributed-table: CREATE TABLE update_hash(id integer, data text); COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -536,15 +536,15 @@ step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('update_hash', 'id'); create_distributed_table - + step s2-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -10 +10 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_upsert_vs_all.out b/src/test/regress/expected/isolation_upsert_vs_all.out index 54089a571..9438d39da 100644 --- a/src/test/regress/expected/isolation_upsert_vs_all.out +++ b/src/test/regress/expected/isolation_upsert_vs_all.out @@ -3,7 +3,7 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-upsert s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -13,17 +13,17 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-update s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -33,17 +33,17 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-delete s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -53,17 +53,17 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -4 +4 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -73,17 +73,17 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-drop s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -96,12 +96,12 @@ step s1-select-count: SELECT COUNT(*) FROM upsert_hash; ERROR: relation "upsert_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -111,9 +111,9 @@ step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers @@ -121,12 +121,12 @@ run_command_on_workers (localhost,57638,t,4) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-upsert s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX upsert_hash_index ON upsert_hash(id); step s1-begin: BEGIN; @@ -137,9 +137,9 @@ step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers @@ -147,12 +147,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-upsert s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; @@ -160,9 +160,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY upsert_hash_ind step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers @@ -170,12 +170,12 @@ run_command_on_workers (localhost,57638,t,4) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -185,9 +185,9 @@ step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -195,12 +195,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-upsert s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE upsert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -211,9 +211,9 @@ step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -221,12 +221,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -236,9 +236,9 @@ step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -246,12 +246,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-table-size s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -259,21 +259,21 @@ step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE step s2-table-size: SELECT citus_total_relation_size('upsert_hash'); citus_total_relation_size -114688 +114688 step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -283,17 +283,17 @@ step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-upsert s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE upsert_hash; step s1-create-non-distributed-table: CREATE TABLE upsert_hash(id integer PRIMARY KEY, data text); step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -305,20 +305,20 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - + step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-upsert s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -328,17 +328,17 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-upsert s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -348,17 +348,17 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-upsert s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -368,17 +368,17 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -1 +1 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-upsert s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -392,12 +392,12 @@ step s1-select-count: SELECT COUNT(*) FROM upsert_hash; ERROR: relation "upsert_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-upsert s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -407,9 +407,9 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers @@ -417,12 +417,12 @@ run_command_on_workers (localhost,57638,t,4) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-upsert s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX upsert_hash_index ON upsert_hash(id); step s1-begin: BEGIN; @@ -433,9 +433,9 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers @@ -443,12 +443,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-upsert s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -458,9 +458,9 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -468,12 +468,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-upsert s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE upsert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -484,9 +484,9 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -494,12 +494,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-upsert s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -510,9 +510,9 @@ step s2-upsert: <... completed> error in steps s1-commit s2-upsert: ERROR: column "data" of relation "upsert_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -520,34 +520,34 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-upsert s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('upsert_hash'); citus_total_relation_size -114688 +114688 step s2-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-upsert s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -557,9 +557,9 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -1 +1 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_validate_vs_insert.out b/src/test/regress/expected/isolation_validate_vs_insert.out index 212edbedf..7af0e764f 100644 --- a/src/test/regress/expected/isolation_validate_vs_insert.out +++ b/src/test/regress/expected/isolation_validate_vs_insert.out @@ -3,7 +3,7 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-add-constraint s1-begin s2-begin s1-validate s2-insert s1-commit s2-commit create_distributed_table - + step s1-initialize: INSERT INTO constrained_table VALUES (0, 0), (1, 1), (2, 2), (3, 4); step s1-add-constraint: ALTER TABLE constrained_table ADD CONSTRAINT check_constraint CHECK(int_data<30) NOT VALID; step s1-begin: BEGIN; @@ -16,23 +16,23 @@ step s2-commit: COMMIT; starting permutation: s1-initialize s1-add-constraint s1-begin s2-begin s1-validate s2-select s1-commit s2-commit create_distributed_table - + step s1-initialize: INSERT INTO constrained_table VALUES (0, 0), (1, 1), (2, 2), (3, 4); step s1-add-constraint: ALTER TABLE constrained_table ADD CONSTRAINT check_constraint CHECK(int_data<30) NOT VALID; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-validate: ALTER TABLE constrained_table VALIDATE CONSTRAINT check_constraint; step s2-select: SELECT sum(int_data) FROM constrained_table; -sum +sum -7 +7 step s1-commit: COMMIT; step s2-commit: COMMIT; starting permutation: s1-initialize s1-add-constraint s1-begin s2-begin s2-insert s1-validate s1-commit s2-commit create_distributed_table - + step s1-initialize: INSERT INTO constrained_table VALUES (0, 0), (1, 1), (2, 2), (3, 4); step s1-add-constraint: ALTER TABLE constrained_table ADD CONSTRAINT check_constraint CHECK(int_data<30) NOT VALID; step s1-begin: BEGIN; @@ -45,15 +45,15 @@ step s2-commit: COMMIT; starting permutation: s1-initialize s1-add-constraint s1-begin s2-begin s2-select s1-validate s1-commit s2-commit create_distributed_table - + step s1-initialize: INSERT INTO constrained_table VALUES (0, 0), (1, 1), (2, 2), (3, 4); step s1-add-constraint: ALTER TABLE constrained_table ADD CONSTRAINT check_constraint CHECK(int_data<30) NOT VALID; step s1-begin: BEGIN; step s2-begin: BEGIN; step s2-select: SELECT sum(int_data) FROM constrained_table; -sum +sum -7 +7 step s1-validate: ALTER TABLE constrained_table VALIDATE CONSTRAINT check_constraint; step s1-commit: COMMIT; step s2-commit: COMMIT; diff --git a/src/test/regress/expected/limit_intermediate_size.out b/src/test/regress/expected/limit_intermediate_size.out index e1471536e..de7c758bf 100644 --- a/src/test/regress/expected/limit_intermediate_size.out +++ b/src/test/regress/expected/limit_intermediate_size.out @@ -2,39 +2,39 @@ SET citus.enable_repartition_joins to ON; SET citus.task_executor_type to 'task-tracker'; SET citus.max_intermediate_result_size TO 2; -- should fail because the copy size is ~4kB for each cte -WITH cte AS +WITH cte AS ( SELECT * FROM users_table ), cte2 AS ( SELECT * FROM events_table -) +) SELECT cte.user_id, cte.value_2 FROM cte,cte2 ORDER BY 1,2 LIMIT 10; ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 2 kB) DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place. HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable. SET citus.max_intermediate_result_size TO 9; -- regular task-tracker CTE should fail -WITH cte AS +WITH cte AS ( - SELECT + SELECT users_table.user_id, users_table.value_1, users_table.value_2 - FROM + FROM users_table - join + join events_table - on + on (users_table.value_3=events_table.value_3) ), cte2 AS ( SELECT * FROM events_table -) -SELECT - cte.user_id, cte2.value_2 -FROM +) +SELECT + cte.user_id, cte2.value_2 +FROM cte JOIN cte2 ON (cte.value_1 = cte2.event_type) -ORDER BY - 1,2 +ORDER BY + 1,2 LIMIT 10; ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 9 kB) DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place. @@ -58,8 +58,8 @@ UNION UNION (select count(*) as c from cte5) ) as foo; - sum ------ + sum +--------------------------------------------------------------------- 91 (1 row) @@ -117,7 +117,7 @@ WITH cte AS ( cte3 AS ( SELECT * FROM events_table WHERE event_type = 1 ) - SELECT * FROM cte2, cte3 WHERE cte2.value_1 IN (SELECT value_2 FROM cte3) + SELECT * FROM cte2, cte3 WHERE cte2.value_1 IN (SELECT value_2 FROM cte3) ) SELECT * FROM cte; ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 3 kB) @@ -178,39 +178,39 @@ cte4 AS ( ) SELECT * FROM cte UNION ALL SELECT * FROM cte4 ORDER BY 1,2,3,4,5 LIMIT 5; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Wed Nov 22 18:49:42.327403 2017 | 3 | 2 | 1 | - 1 | Wed Nov 22 19:03:01.772353 2017 | 4 | 1 | 2 | - 1 | Wed Nov 22 19:07:03.846437 2017 | 1 | 2 | 5 | - 1 | Wed Nov 22 20:56:21.122638 2017 | 2 | 4 | 4 | - 1 | Wed Nov 22 21:06:57.457147 2017 | 4 | 3 | 2 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Wed Nov 22 18:49:42.327403 2017 | 3 | 2 | 1 | + 1 | Wed Nov 22 19:03:01.772353 2017 | 4 | 1 | 2 | + 1 | Wed Nov 22 19:07:03.846437 2017 | 1 | 2 | 5 | + 1 | Wed Nov 22 20:56:21.122638 2017 | 2 | 4 | 4 | + 1 | Wed Nov 22 21:06:57.457147 2017 | 4 | 3 | 2 | (5 rows) -- regular task-tracker CTE, should work since -1 disables the limit -WITH cte AS +WITH cte AS ( - SELECT + SELECT users_table.user_id, users_table.value_1, users_table.value_2 - FROM + FROM users_table - join + join events_table - on + on (users_table.value_2=events_table.value_2) ), cte2 AS ( SELECT * FROM events_table -) -SELECT - cte.user_id, cte2.value_2 -FROM +) +SELECT + cte.user_id, cte2.value_2 +FROM cte JOIN cte2 ON (cte.value_1 = cte2.event_type) -ORDER BY - 1,2 +ORDER BY + 1,2 LIMIT 10; - user_id | value_2 ----------+--------- + user_id | value_2 +--------------------------------------------------------------------- 1 | 0 1 | 0 1 | 0 @@ -224,16 +224,16 @@ LIMIT 10; (10 rows) -- regular real-time CTE fetches around ~4kb data in each subplan -WITH cte AS +WITH cte AS ( SELECT * FROM users_table ), cte2 AS ( SELECT * FROM events_table -) +) SELECT cte.user_id, cte.value_2 FROM cte,cte2 ORDER BY 1,2 LIMIT 10; - user_id | value_2 ----------+--------- + user_id | value_2 +--------------------------------------------------------------------- 1 | 0 1 | 0 1 | 0 @@ -247,23 +247,23 @@ SELECT cte.user_id, cte.value_2 FROM cte,cte2 ORDER BY 1,2 LIMIT 10; (10 rows) -- regular real-time query fetches ~4kB -WITH cte AS +WITH cte AS ( SELECT * FROM users_table WHERE user_id IN (1,2,3,4,5) ) SELECT * FROM cte ORDER BY 1,2,3,4,5 LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | (10 rows) -- nested CTEs @@ -274,16 +274,16 @@ WITH cte AS ( cte3 AS ( SELECT * FROM events_table ) - SELECT + SELECT cte2.user_id, cte2.time, cte3.event_type, cte3.value_2, cte3.value_3 - FROM - cte2, cte3 - WHERE + FROM + cte2, cte3 + WHERE cte2.user_id = cte3.user_id AND cte2.user_id = 1 ) SELECT * FROM cte ORDER BY 1,2,3,4,5 LIMIT 10; - user_id | time | event_type | value_2 | value_3 ----------+---------------------------------+------------+---------+--------- + user_id | time | event_type | value_2 | value_3 +--------------------------------------------------------------------- 1 | Wed Nov 22 22:51:43.132261 2017 | 0 | 2 | 0 1 | Wed Nov 22 22:51:43.132261 2017 | 0 | 5 | 1 1 | Wed Nov 22 22:51:43.132261 2017 | 1 | 1 | 1 diff --git a/src/test/regress/expected/local_shard_execution.out b/src/test/regress/expected/local_shard_execution.out index 1a4626b6d..b2116e903 100644 --- a/src/test/regress/expected/local_shard_execution.out +++ b/src/test/regress/expected/local_shard_execution.out @@ -6,23 +6,23 @@ SET citus.replication_model TO 'streaming'; SET citus.next_shard_id TO 1470000; CREATE TABLE reference_table (key int PRIMARY KEY); SELECT create_reference_table('reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE distributed_table (key int PRIMARY KEY , value text, age bigint CHECK (age > 10), FOREIGN KEY (key) REFERENCES reference_table(key) ON DELETE CASCADE); SELECT create_distributed_table('distributed_table','key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE second_distributed_table (key int PRIMARY KEY , value text, FOREIGN KEY (key) REFERENCES distributed_table(key) ON DELETE CASCADE); SELECT create_distributed_table('second_distributed_table','key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- ingest some data to enable some tests with data @@ -39,9 +39,9 @@ CREATE TABLE collections_list ( PRIMARY KEY(key, collection_id) ) PARTITION BY LIST (collection_id ); SELECT create_distributed_table('collections_list', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE collections_list_0 @@ -79,39 +79,39 @@ $$ LANGUAGE plpgsql; -- distribution key values of 1,6, 500 and 701 are LOCAL to shards, -- we'll use these values in the tests SELECT shard_of_distribution_column_is_local(1); - shard_of_distribution_column_is_local ---------------------------------------- + shard_of_distribution_column_is_local +--------------------------------------------------------------------- t (1 row) SELECT shard_of_distribution_column_is_local(6); - shard_of_distribution_column_is_local ---------------------------------------- + shard_of_distribution_column_is_local +--------------------------------------------------------------------- t (1 row) SELECT shard_of_distribution_column_is_local(500); - shard_of_distribution_column_is_local ---------------------------------------- + shard_of_distribution_column_is_local +--------------------------------------------------------------------- t (1 row) SELECT shard_of_distribution_column_is_local(701); - shard_of_distribution_column_is_local ---------------------------------------- + shard_of_distribution_column_is_local +--------------------------------------------------------------------- t (1 row) -- distribution key values of 11 and 12 are REMOTE to shards SELECT shard_of_distribution_column_is_local(11); - shard_of_distribution_column_is_local ---------------------------------------- + shard_of_distribution_column_is_local +--------------------------------------------------------------------- f (1 row) SELECT shard_of_distribution_column_is_local(12); - shard_of_distribution_column_is_local ---------------------------------------- + shard_of_distribution_column_is_local +--------------------------------------------------------------------- f (1 row) @@ -122,8 +122,8 @@ SET citus.log_local_commands TO ON; -- with simple queries that are not in transcation blocks SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -131,21 +131,21 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e -- because local execution means executing the tasks locally, so the executor -- favors parallel execution even if everyting is local to node SELECT count(*) FROM distributed_table WHERE key IN (1,6); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- queries that hit any remote shards should NOT use local execution SELECT count(*) FROM distributed_table WHERE key IN (1,11); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM distributed_table; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -179,8 +179,8 @@ WHERE ON CONFLICT(key) DO UPDATE SET value = '22' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) SELECT distributed_table.key, distributed_table.value, distributed_table.age FROM local_shard_execution.distributed_table_1470001 distributed_table, local_shard_execution.second_distributed_table_1470005 second_distributed_table WHERE (((distributed_table.key OPERATOR(pg_catalog.=) 1) AND (distributed_table.key OPERATOR(pg_catalog.=) second_distributed_table.key)) AND ((worker_hash(distributed_table.key) OPERATOR(pg_catalog.>=) '-2147483648'::integer) AND (worker_hash(distributed_table.key) OPERATOR(pg_catalog.<=) '-1073741825'::integer))) ON CONFLICT(key) DO UPDATE SET value = '22'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 1 | 22 | 20 (1 row) @@ -194,8 +194,8 @@ WHERE distributed_table.key != 1 and distributed_table.key=second_distributed_table.key ON CONFLICT(key) DO UPDATE SET value = '22' RETURNING *; - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- (0 rows) -- INSERT..SELECT via coordinator consists of two steps, select + COPY @@ -208,39 +208,39 @@ INSERT INTO distributed_table SELECT * FROM distributed_table ON CONFLICT DO NOT -- EXPLAIN for local execution just works fine -- though going through distributed execution EXPLAIN (COSTS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; - QUERY PLAN ------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Index Scan using distributed_table_pkey_1470001 on distributed_table_1470001 distributed_table Index Cond: (key = 1) Filter: (age = 20) (8 rows) EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Index Scan using distributed_table_pkey_1470001 on distributed_table_1470001 distributed_table (actual rows=1 loops=1) Index Cond: (key = 1) Filter: (age = 20) (8 rows) EXPLAIN (COSTS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Delete on distributed_table_1470001 distributed_table -> Index Scan using distributed_table_pkey_1470001 on distributed_table_1470001 distributed_table Index Cond: (key = 1) @@ -248,13 +248,13 @@ EXPLAIN (COSTS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; (9 rows) EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) (actual rows=0 loops=1) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Delete on distributed_table_1470001 distributed_table (actual rows=0 loops=1) -> Index Scan using distributed_table_pkey_1470001 on distributed_table_1470001 distributed_table (actual rows=0 loops=1) Index Cond: (key = 1) @@ -264,14 +264,14 @@ EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) DELETE FROM distributed_ta -- show that EXPLAIN ANALYZE deleted the row and cascades deletes SELECT * FROM distributed_table WHERE key = 1 AND age = 20 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((key OPERATOR(pg_catalog.=) 1) AND (age OPERATOR(pg_catalog.=) 20)) ORDER BY key, value, age - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- (0 rows) SELECT * FROM second_distributed_table WHERE key = 1 ORDER BY 1,2; LOG: executing the command locally: SELECT key, value FROM local_shard_execution.second_distributed_table_1470005 second_distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value - key | value ------+------- + key | value +--------------------------------------------------------------------- (0 rows) -- Put rows back for other tests @@ -294,15 +294,15 @@ COPY second_distributed_table FROM STDIN WITH CSV; BEGIN; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 1 | 29 | 20 (1 row) SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value, age - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 1 | 29 | 20 (1 row) @@ -310,8 +310,8 @@ ROLLBACK; -- make sure that the value is rollbacked SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value, age - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 1 | 22 | 20 (1 row) @@ -319,8 +319,8 @@ LOG: executing the command locally: SELECT key, value, age FROM local_shard_exe BEGIN; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 1 | 29 | 20 (1 row) @@ -331,8 +331,8 @@ LOG: executing the command locally: DELETE FROM local_shard_execution.distribut SELECT count(*) FROM second_distributed_table; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.second_distributed_table_1470005 second_distributed_table WHERE true LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.second_distributed_table_1470007 second_distributed_table WHERE true - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -340,20 +340,20 @@ ROLLBACK; -- make sure that everything is rollbacked SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value, age - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 1 | 22 | 20 (1 row) SELECT count(*) FROM second_distributed_table; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) SELECT * FROM second_distributed_table; - key | value ------+------- + key | value +--------------------------------------------------------------------- 1 | 1 6 | '6' (2 rows) @@ -364,8 +364,8 @@ BEGIN; -- INSERT is executed locally INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '23' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '23'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 1 | 23 | 20 (1 row) @@ -373,8 +373,8 @@ LOG: executing the command locally: INSERT INTO local_shard_execution.distribut -- executed locally and see the changes SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value, age - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 1 | 23 | 20 (1 row) @@ -383,8 +383,8 @@ LOG: executing the command locally: SELECT key, value, age FROM local_shard_exe SELECT * FROM distributed_table WHERE value = '23' ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (value OPERATOR(pg_catalog.=) '23'::text) LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (value OPERATOR(pg_catalog.=) '23'::text) - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 1 | 23 | 20 (1 row) @@ -397,16 +397,16 @@ LOG: executing the command locally: DELETE FROM local_shard_execution.distribut SELECT * FROM distributed_table WHERE value = '23' ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (value OPERATOR(pg_catalog.=) '23'::text) LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (value OPERATOR(pg_catalog.=) '23'::text) - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- (0 rows) COMMIT; -- make sure that we've committed everything SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value, age - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- (0 rows) -- if we start with a distributed execution, we should keep @@ -416,8 +416,8 @@ BEGIN; -- although this command could have been executed -- locally, it is not going to be executed locally SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- (0 rows) -- but we can still execute parallel queries, even if @@ -426,8 +426,8 @@ BEGIN; NOTICE: truncate cascades to table "second_distributed_table" -- TRUNCATE cascaded into second_distributed_table SELECT count(*) FROM second_distributed_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -441,8 +441,8 @@ BEGIN; -- this could go through local execution, but doesn't because we've already -- done distributed execution SELECT * FROM distributed_table WHERE key = 500 ORDER BY 1,2,3; - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 500 | 500 | 25 (1 row) @@ -451,8 +451,8 @@ BEGIN; NOTICE: truncate cascades to table "second_distributed_table" -- ensure that TRUNCATE made it SELECT * FROM distributed_table WHERE key = 500 ORDER BY 1,2,3; - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- (0 rows) ROLLBACK; @@ -468,15 +468,15 @@ LOG: executing the command locally: INSERT INTO local_shard_execution.second_di LOG: executing the command locally: DELETE FROM local_shard_execution.reference_table_1470000 reference_table WHERE (key OPERATOR(pg_catalog.=) 701) SELECT count(*) FROM distributed_table WHERE key = 701; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 701) - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM second_distributed_table WHERE key = 701; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.second_distributed_table_1470005 second_distributed_table WHERE (key OPERATOR(pg_catalog.=) 701) - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -484,8 +484,8 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e SELECT count(*) FROM distributed_table WHERE key > 700; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.>) 700) LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.>) 700) - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -498,22 +498,22 @@ ROLLBACK; BEGIN; SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM distributed_table WHERE key = 6; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.=) 6) - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM distributed_table WHERE key = 500; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.=) 500) - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -522,8 +522,8 @@ ROLLBACK; BEGIN; SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -537,8 +537,8 @@ ROLLBACK; BEGIN; SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -552,8 +552,8 @@ ROLLBACK; BEGIN; SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -566,8 +566,8 @@ ROLLBACK; BEGIN; SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -578,8 +578,8 @@ HINT: Try re-running the transaction with "SET LOCAL citus.enable_local_executi ROLLBACK; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 1 | 11 | 21 (1 row) @@ -593,8 +593,8 @@ HINT: Try re-running the transaction with "SET LOCAL citus.enable_local_executi ROLLBACK; BEGIN; INSERT INTO distributed_table VALUES (11, '111',29) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *; - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 11 | 29 | 121 (1 row) @@ -606,8 +606,8 @@ HINT: Connect to the coordinator and run it again. ROLLBACK; BEGIN; INSERT INTO distributed_table VALUES (11, '111',29) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *; - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 11 | 29 | 121 (1 row) @@ -686,9 +686,9 @@ WITH local_insert AS (INSERT INTO distributed_table VALUES (1, '11',21) ON CONFL distributed_local_mixed AS (SELECT * FROM reference_table WHERE key IN (SELECT key FROM local_insert)) SELECT * FROM local_insert, distributed_local_mixed; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age -LOG: executing the command locally: SELECT key FROM local_shard_execution.reference_table_1470000 reference_table WHERE (key OPERATOR(pg_catalog.=) ANY (SELECT local_insert.key FROM (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.age FROM read_intermediate_result('81_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, age bigint)) local_insert)) - key | value | age | key ------+-------+-----+----- +LOG: executing the command locally: SELECT key FROM local_shard_execution.reference_table_1470000 reference_table WHERE (key OPERATOR(pg_catalog.=) ANY (SELECT local_insert.key FROM (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.age FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, age bigint)) local_insert)) + key | value | age | key +--------------------------------------------------------------------- 1 | 11 | 21 | 1 (1 row) @@ -697,8 +697,8 @@ LOG: executing the command locally: SELECT key FROM local_shard_execution.refer WITH distributed_local_mixed AS (SELECT * FROM distributed_table), local_insert AS (INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *) SELECT * FROM local_insert, distributed_local_mixed ORDER BY 1,2,3,4,5; - key | value | age | key | value | age ------+-------+-----+-----+-------+----- + key | value | age | key | value | age +--------------------------------------------------------------------- 1 | 29 | 21 | 1 | 11 | 21 (1 row) @@ -711,8 +711,8 @@ FROM WHERE distributed_table.key = all_data.key AND distributed_table.key = 1; LOG: executing the command locally: WITH all_data AS (SELECT distributed_table_1.key, distributed_table_1.value, distributed_table_1.age FROM local_shard_execution.distributed_table_1470001 distributed_table_1 WHERE (distributed_table_1.key OPERATOR(pg_catalog.=) 1)) SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table, all_data WHERE ((distributed_table.key OPERATOR(pg_catalog.=) all_data.key) AND (distributed_table.key OPERATOR(pg_catalog.=) 1)) - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -730,8 +730,8 @@ WHERE distributed_table.value = all_data.value AND distributed_table.key = 1 ORDER BY 1 DESC; - key ------ + key +--------------------------------------------------------------------- 1 (1 row) @@ -748,8 +748,8 @@ FROM WHERE distributed_table.key = all_data.key AND distributed_table.key = 1 AND EXISTS (SELECT * FROM all_data); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -763,8 +763,8 @@ FROM distributed_table, all_data WHERE distributed_table.key = all_data.age AND distributed_table.key = 1; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -773,8 +773,8 @@ TRUNCATE reference_table, distributed_table, second_distributed_table; -- local execution of returning of reference tables INSERT INTO reference_table VALUES (1),(2),(3),(4),(5),(6) RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.reference_table_1470000 AS citus_table_alias (key) VALUES (1), (2), (3), (4), (5), (6) RETURNING citus_table_alias.key - key ------ + key +--------------------------------------------------------------------- 1 2 3 @@ -786,8 +786,8 @@ LOG: executing the command locally: INSERT INTO local_shard_execution.reference -- local execution of multi-row INSERTs INSERT INTO distributed_table VALUES (1, '11',21), (5,'55',22) ON CONFLICT(key) DO UPDATE SET value = (EXCLUDED.value::int + 1)::text RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1,'11'::text,'21'::bigint), (5,'55'::text,'22'::bigint) ON CONFLICT(key) DO UPDATE SET value = (((excluded.value)::integer OPERATOR(pg_catalog.+) 1))::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 1 | 11 | 21 5 | 55 | 22 (2 rows) @@ -796,8 +796,8 @@ LOG: executing the command locally: INSERT INTO local_shard_execution.distribut -- could have been done via local execution but the executor choose the other way around -- because the command is a multi-shard query INSERT INTO distributed_table VALUES (1, '11',21), (2,'22',22), (3,'33',33), (4,'44',44),(5,'55',55) ON CONFLICT(key) DO UPDATE SET value = (EXCLUDED.value::int + 1)::text RETURNING *; - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 1 | 12 | 21 2 | 22 | 22 3 | 33 | 33 @@ -812,86 +812,86 @@ BEGIN; -- 6 local execution without params EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count -------- + count +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count -------- + count +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count -------- + count +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count -------- + count +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count -------- + count +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- 6 local executions with params EXECUTE local_prepare_param(1); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count -------- + count +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_param(5); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 5) - count -------- + count +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_param(6); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.=) 6) - count -------- + count +--------------------------------------------------------------------- 0 (1 row) EXECUTE local_prepare_param(1); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count -------- + count +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_param(5); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 5) - count -------- + count +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_param(6); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.=) 6) - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -899,8 +899,8 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e EXECUTE remote_prepare_param(1); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.<>) 1) LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.<>) 1) - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -911,86 +911,86 @@ BEGIN; -- 6 local execution without params EXECUTE local_insert_prepare_no_param; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, '21'::bigint) ON CONFLICT(key) DO UPDATE SET value = '2928'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age, (citus_table_alias.key OPERATOR(pg_catalog.+) 1), (citus_table_alias.value OPERATOR(pg_catalog.||) '30'::text), (citus_table_alias.age OPERATOR(pg_catalog.*) 15) - key | value | age | ?column? | ?column? | ?column? ------+-------+-----+----------+----------+---------- + key | value | age | ?column? | ?column? | ?column? +--------------------------------------------------------------------- 1 | 2928 | 21 | 2 | 292830 | 315 (1 row) EXECUTE local_insert_prepare_no_param; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, '21'::bigint) ON CONFLICT(key) DO UPDATE SET value = '2928'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age, (citus_table_alias.key OPERATOR(pg_catalog.+) 1), (citus_table_alias.value OPERATOR(pg_catalog.||) '30'::text), (citus_table_alias.age OPERATOR(pg_catalog.*) 15) - key | value | age | ?column? | ?column? | ?column? ------+-------+-----+----------+----------+---------- + key | value | age | ?column? | ?column? | ?column? +--------------------------------------------------------------------- 1 | 2928 | 21 | 2 | 292830 | 315 (1 row) EXECUTE local_insert_prepare_no_param; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, '21'::bigint) ON CONFLICT(key) DO UPDATE SET value = '2928'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age, (citus_table_alias.key OPERATOR(pg_catalog.+) 1), (citus_table_alias.value OPERATOR(pg_catalog.||) '30'::text), (citus_table_alias.age OPERATOR(pg_catalog.*) 15) - key | value | age | ?column? | ?column? | ?column? ------+-------+-----+----------+----------+---------- + key | value | age | ?column? | ?column? | ?column? +--------------------------------------------------------------------- 1 | 2928 | 21 | 2 | 292830 | 315 (1 row) EXECUTE local_insert_prepare_no_param; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, '21'::bigint) ON CONFLICT(key) DO UPDATE SET value = '2928'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age, (citus_table_alias.key OPERATOR(pg_catalog.+) 1), (citus_table_alias.value OPERATOR(pg_catalog.||) '30'::text), (citus_table_alias.age OPERATOR(pg_catalog.*) 15) - key | value | age | ?column? | ?column? | ?column? ------+-------+-----+----------+----------+---------- + key | value | age | ?column? | ?column? | ?column? +--------------------------------------------------------------------- 1 | 2928 | 21 | 2 | 292830 | 315 (1 row) EXECUTE local_insert_prepare_no_param; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, '21'::bigint) ON CONFLICT(key) DO UPDATE SET value = '2928'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age, (citus_table_alias.key OPERATOR(pg_catalog.+) 1), (citus_table_alias.value OPERATOR(pg_catalog.||) '30'::text), (citus_table_alias.age OPERATOR(pg_catalog.*) 15) - key | value | age | ?column? | ?column? | ?column? ------+-------+-----+----------+----------+---------- + key | value | age | ?column? | ?column? | ?column? +--------------------------------------------------------------------- 1 | 2928 | 21 | 2 | 292830 | 315 (1 row) EXECUTE local_insert_prepare_no_param; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, '21'::bigint) ON CONFLICT(key) DO UPDATE SET value = '2928'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age, (citus_table_alias.key OPERATOR(pg_catalog.+) 1), (citus_table_alias.value OPERATOR(pg_catalog.||) '30'::text), (citus_table_alias.age OPERATOR(pg_catalog.*) 15) - key | value | age | ?column? | ?column? | ?column? ------+-------+-----+----------+----------+---------- + key | value | age | ?column? | ?column? | ?column? +--------------------------------------------------------------------- 1 | 2928 | 21 | 2 | 292830 | 315 (1 row) -- 6 local executions with params EXECUTE local_insert_prepare_param(1); LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, '21'::bigint) ON CONFLICT(key) DO UPDATE SET value = '2928'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age, (citus_table_alias.key OPERATOR(pg_catalog.+) 1), (citus_table_alias.value OPERATOR(pg_catalog.||) '30'::text), (citus_table_alias.age OPERATOR(pg_catalog.*) 15) - key | value | age | ?column? | ?column? | ?column? ------+-------+-----+----------+----------+---------- + key | value | age | ?column? | ?column? | ?column? +--------------------------------------------------------------------- 1 | 2928 | 21 | 2 | 292830 | 315 (1 row) EXECUTE local_insert_prepare_param(5); LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (5, '11'::text, '21'::bigint) ON CONFLICT(key) DO UPDATE SET value = '2928'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age, (citus_table_alias.key OPERATOR(pg_catalog.+) 1), (citus_table_alias.value OPERATOR(pg_catalog.||) '30'::text), (citus_table_alias.age OPERATOR(pg_catalog.*) 15) - key | value | age | ?column? | ?column? | ?column? ------+-------+-----+----------+----------+---------- + key | value | age | ?column? | ?column? | ?column? +--------------------------------------------------------------------- 5 | 2928 | 22 | 6 | 292830 | 330 (1 row) EXECUTE local_insert_prepare_param(6); LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470003 AS citus_table_alias (key, value, age) VALUES (6, '11'::text, '21'::bigint) ON CONFLICT(key) DO UPDATE SET value = '2928'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age, (citus_table_alias.key OPERATOR(pg_catalog.+) 1), (citus_table_alias.value OPERATOR(pg_catalog.||) '30'::text), (citus_table_alias.age OPERATOR(pg_catalog.*) 15) - key | value | age | ?column? | ?column? | ?column? ------+-------+-----+----------+----------+---------- + key | value | age | ?column? | ?column? | ?column? +--------------------------------------------------------------------- 6 | 11 | 21 | 7 | 1130 | 315 (1 row) EXECUTE local_insert_prepare_param(1); LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, '21'::bigint) ON CONFLICT(key) DO UPDATE SET value = '2928'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age, (citus_table_alias.key OPERATOR(pg_catalog.+) 1), (citus_table_alias.value OPERATOR(pg_catalog.||) '30'::text), (citus_table_alias.age OPERATOR(pg_catalog.*) 15) - key | value | age | ?column? | ?column? | ?column? ------+-------+-----+----------+----------+---------- + key | value | age | ?column? | ?column? | ?column? +--------------------------------------------------------------------- 1 | 2928 | 21 | 2 | 292830 | 315 (1 row) EXECUTE local_insert_prepare_param(5); LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (5, '11'::text, '21'::bigint) ON CONFLICT(key) DO UPDATE SET value = '2928'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age, (citus_table_alias.key OPERATOR(pg_catalog.+) 1), (citus_table_alias.value OPERATOR(pg_catalog.||) '30'::text), (citus_table_alias.age OPERATOR(pg_catalog.*) 15) - key | value | age | ?column? | ?column? | ?column? ------+-------+-----+----------+----------+---------- + key | value | age | ?column? | ?column? | ?column? +--------------------------------------------------------------------- 5 | 2928 | 22 | 6 | 292830 | 330 (1 row) EXECUTE local_insert_prepare_param(6); LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470003 AS citus_table_alias (key, value, age) VALUES (6, '11'::text, '21'::bigint) ON CONFLICT(key) DO UPDATE SET value = '2928'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age, (citus_table_alias.key OPERATOR(pg_catalog.+) 1), (citus_table_alias.value OPERATOR(pg_catalog.||) '30'::text), (citus_table_alias.age OPERATOR(pg_catalog.*) 15) - key | value | age | ?column? | ?column? | ?column? ------+-------+-----+----------+----------+---------- + key | value | age | ?column? | ?column? | ?column? +--------------------------------------------------------------------- 6 | 2928 | 21 | 7 | 292830 | 315 (1 row) @@ -998,8 +998,8 @@ LOG: executing the command locally: INSERT INTO local_shard_execution.distribut EXECUTE remote_prepare_param(2); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.<>) 2) LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.<>) 2) - count -------- + count +--------------------------------------------------------------------- 5 (1 row) @@ -1068,8 +1068,8 @@ ROLLBACK; BEGIN; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '100' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '100'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 1 | 100 | 21 (1 row) @@ -1082,23 +1082,23 @@ ERROR: division by zero ROLLBACK; -- we've rollbacked everything SELECT count(*) FROM distributed_table WHERE value = '200'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- RETURNING should just work fine for reference tables INSERT INTO reference_table VALUES (500) RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.reference_table_1470000 (key) VALUES (500) RETURNING key - key ------ + key +--------------------------------------------------------------------- 500 (1 row) DELETE FROM reference_table WHERE key = 500 RETURNING *; LOG: executing the command locally: DELETE FROM local_shard_execution.reference_table_1470000 reference_table WHERE (key OPERATOR(pg_catalog.=) 500) RETURNING key - key ------ + key +--------------------------------------------------------------------- 500 (1 row) @@ -1107,8 +1107,8 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO sequential ; DELETE FROM distributed_table; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '100' RETURNING *; - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 1 | 11 | 21 (1 row) @@ -1118,8 +1118,8 @@ BEGIN; SET citus.multi_shard_modify_mode TO sequential ; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '100' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '100'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 1 | 100 | 21 (1 row) @@ -1145,8 +1145,8 @@ ROLLBACK; BEGIN; DELETE FROM reference_table WHERE key = 500 RETURNING *; LOG: executing the command locally: DELETE FROM local_shard_execution.reference_table_1470000 reference_table WHERE (key OPERATOR(pg_catalog.=) 500) RETURNING key - key ------ + key +--------------------------------------------------------------------- 500 (1 row) @@ -1169,8 +1169,8 @@ BEGIN; SET LOCAL citus.task_executor_type = 'task-tracker'; SET LOCAL client_min_messages TO INFO; SELECT count(*) FROM distributed_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -1183,8 +1183,8 @@ CREATE VIEW v_local_query_execution AS SELECT * FROM distributed_table WHERE key = 500; SELECT * FROM v_local_query_execution; LOG: executing the command locally: SELECT key, value, age FROM (SELECT distributed_table.key, distributed_table.value, distributed_table.age FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (distributed_table.key OPERATOR(pg_catalog.=) 500)) v_local_query_execution - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 500 | 500 | 25 (1 row) @@ -1194,8 +1194,8 @@ CREATE VIEW v_local_query_execution_2 AS SELECT * FROM distributed_table; SELECT * FROM v_local_query_execution_2 WHERE key = 500; LOG: executing the command locally: SELECT key, value, age FROM (SELECT distributed_table.key, distributed_table.value, distributed_table.age FROM local_shard_execution.distributed_table_1470003 distributed_table) v_local_query_execution_2 WHERE (key OPERATOR(pg_catalog.=) 500) - key | value | age ------+-------+----- + key | value | age +--------------------------------------------------------------------- 500 | 500 | 25 (1 row) @@ -1204,8 +1204,8 @@ LOG: executing the command locally: SELECT key, value, age FROM (SELECT distrib BEGIN; SAVEPOINT my_savepoint; SELECT count(*) FROM distributed_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -1222,8 +1222,8 @@ LOG: executing the command locally: DELETE FROM local_shard_execution.distribut SELECT count(*) FROM distributed_table; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE true LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE true - count -------- + count +--------------------------------------------------------------------- 100 (1 row) @@ -1234,9 +1234,9 @@ COMMIT; -- sanity check: local execution on partitions INSERT INTO collections_list (collection_id) VALUES (0) RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.collections_list_1470011 (key, ser, collection_id) VALUES ('3940649673949185'::bigint, '3940649673949185'::bigint, 0) RETURNING key, ser, ts, collection_id, value - key | ser | ts | collection_id | value -------------------+------------------+----+---------------+------- - 3940649673949185 | 3940649673949185 | | 0 | + key | ser | ts | collection_id | value +--------------------------------------------------------------------- + 3940649673949185 | 3940649673949185 | | 0 | (1 row) BEGIN; @@ -1244,26 +1244,26 @@ BEGIN; LOG: executing the command locally: INSERT INTO local_shard_execution.collections_list_1470009 (key, ser, collection_id) VALUES ('1'::bigint, '3940649673949186'::bigint, 0) SELECT count(*) FROM collections_list_0 WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.collections_list_0_1470013 collections_list_0 WHERE (key OPERATOR(pg_catalog.=) 1) - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM collections_list; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.collections_list_1470009 collections_list WHERE true LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.collections_list_1470011 collections_list WHERE true - count -------- + count +--------------------------------------------------------------------- 2 (1 row) SELECT * FROM collections_list ORDER BY 1,2,3,4; LOG: executing the command locally: SELECT key, ser, ts, collection_id, value FROM local_shard_execution.collections_list_1470009 collections_list WHERE true LOG: executing the command locally: SELECT key, ser, ts, collection_id, value FROM local_shard_execution.collections_list_1470011 collections_list WHERE true - key | ser | ts | collection_id | value -------------------+------------------+----+---------------+------- - 1 | 3940649673949186 | | 0 | - 3940649673949185 | 3940649673949185 | | 0 | + key | ser | ts | collection_id | value +--------------------------------------------------------------------- + 1 | 3940649673949186 | | 0 | + 3940649673949185 | 3940649673949185 | | 0 | (2 rows) COMMIT; @@ -1273,93 +1273,93 @@ TRUNCATE collections_list; ALTER SEQUENCE collections_list_key_seq NO MINVALUE NO MAXVALUE; PREPARE serial_prepared_local AS INSERT INTO collections_list (collection_id) VALUES (0) RETURNING key, ser; SELECT setval('collections_list_key_seq', 4); - setval --------- + setval +--------------------------------------------------------------------- 4 (1 row) EXECUTE serial_prepared_local; LOG: executing the command locally: INSERT INTO local_shard_execution.collections_list_1470009 (key, ser, collection_id) VALUES ('5'::bigint, '3940649673949187'::bigint, 0) RETURNING key, ser - key | ser ------+------------------ + key | ser +--------------------------------------------------------------------- 5 | 3940649673949187 (1 row) SELECT setval('collections_list_key_seq', 5); - setval --------- + setval +--------------------------------------------------------------------- 5 (1 row) EXECUTE serial_prepared_local; LOG: executing the command locally: INSERT INTO local_shard_execution.collections_list_1470011 (key, ser, collection_id) VALUES ('6'::bigint, '3940649673949188'::bigint, 0) RETURNING key, ser - key | ser ------+------------------ + key | ser +--------------------------------------------------------------------- 6 | 3940649673949188 (1 row) SELECT setval('collections_list_key_seq', 499); - setval --------- + setval +--------------------------------------------------------------------- 499 (1 row) EXECUTE serial_prepared_local; LOG: executing the command locally: INSERT INTO local_shard_execution.collections_list_1470011 (key, ser, collection_id) VALUES ('500'::bigint, '3940649673949189'::bigint, 0) RETURNING key, ser - key | ser ------+------------------ + key | ser +--------------------------------------------------------------------- 500 | 3940649673949189 (1 row) SELECT setval('collections_list_key_seq', 700); - setval --------- + setval +--------------------------------------------------------------------- 700 (1 row) EXECUTE serial_prepared_local; LOG: executing the command locally: INSERT INTO local_shard_execution.collections_list_1470009 (key, ser, collection_id) VALUES ('701'::bigint, '3940649673949190'::bigint, 0) RETURNING key, ser - key | ser ------+------------------ + key | ser +--------------------------------------------------------------------- 701 | 3940649673949190 (1 row) SELECT setval('collections_list_key_seq', 708); - setval --------- + setval +--------------------------------------------------------------------- 708 (1 row) EXECUTE serial_prepared_local; LOG: executing the command locally: INSERT INTO local_shard_execution.collections_list_1470011 (key, ser, collection_id) VALUES ('709'::bigint, '3940649673949191'::bigint, 0) RETURNING key, ser - key | ser ------+------------------ + key | ser +--------------------------------------------------------------------- 709 | 3940649673949191 (1 row) SELECT setval('collections_list_key_seq', 709); - setval --------- + setval +--------------------------------------------------------------------- 709 (1 row) EXECUTE serial_prepared_local; LOG: executing the command locally: INSERT INTO local_shard_execution.collections_list_1470009 (key, ser, collection_id) VALUES ('710'::bigint, '3940649673949192'::bigint, 0) RETURNING key, ser - key | ser ------+------------------ + key | ser +--------------------------------------------------------------------- 710 | 3940649673949192 (1 row) -- and, one remote test SELECT setval('collections_list_key_seq', 10); - setval --------- + setval +--------------------------------------------------------------------- 10 (1 row) EXECUTE serial_prepared_local; - key | ser ------+------------------ + key | ser +--------------------------------------------------------------------- 11 | 3940649673949193 (1 row) @@ -1368,8 +1368,8 @@ EXECUTE serial_prepared_local; -- Citus currently doesn't allow using task_assignment_policy for intermediate results WITH distributed_local_mixed AS (INSERT INTO reference_table VALUES (1000) RETURNING *) SELECT * FROM distributed_local_mixed; LOG: executing the command locally: INSERT INTO local_shard_execution.reference_table_1470000 (key) VALUES (1000) RETURNING key - key ------- + key +--------------------------------------------------------------------- 1000 (1 row) @@ -1388,8 +1388,8 @@ LOG: executing the command locally: INSERT INTO local_shard_execution.distribut DELETE FROM distributed_table RETURNING key; LOG: executing the command locally: DELETE FROM local_shard_execution.distributed_table_1470001 distributed_table RETURNING key LOG: executing the command locally: DELETE FROM local_shard_execution.distributed_table_1470003 distributed_table RETURNING key - key ------ + key +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1408,8 +1408,8 @@ BEGIN; LOG: executing the command locally: INSERT INTO local_shard_execution.reference_table_1470000 (key) VALUES (1) DELETE FROM reference_table RETURNING key; LOG: executing the command locally: DELETE FROM local_shard_execution.reference_table_1470000 reference_table RETURNING key - key ------ + key +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1427,9 +1427,9 @@ CREATE TABLE event_responses ( primary key (event_id, user_id) ); SELECT create_distributed_table('event_responses', 'event_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE OR REPLACE PROCEDURE register_for_event(p_event_id int, p_user_id int, p_choice invite_resp) @@ -1441,9 +1441,9 @@ BEGIN END; $fn$; SELECT create_distributed_function('register_for_event(int,int,invite_resp)', 'p_event_id', 'event_responses'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) -- call 6 times to make sure it works after the 5th time(postgres binds values after the 5th time) diff --git a/src/test/regress/expected/materialized_view.out b/src/test/regress/expected/materialized_view.out index 4be1c304e..b8cb80529 100644 --- a/src/test/regress/expected/materialized_view.out +++ b/src/test/regress/expected/materialized_view.out @@ -1,6 +1,6 @@ ---- +--------------------------------------------------------------------- --- materialized_view ---- +--------------------------------------------------------------------- -- This file contains test cases for materialized view support. -- materialized views work -- insert into... select works with views @@ -9,23 +9,23 @@ SET search_path TO materialized_view, public; CREATE VIEW air_shipped_lineitems AS SELECT * FROM lineitem_hash_part WHERE l_shipmode = 'AIR'; CREATE TABLE temp_lineitem(LIKE lineitem_hash_part); SELECT create_distributed_table('temp_lineitem', 'l_orderkey', 'hash', 'lineitem_hash_part'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO temp_lineitem SELECT * FROM air_shipped_lineitems; SELECT count(*) FROM temp_lineitem; - count -------- + count +--------------------------------------------------------------------- 1706 (1 row) -- following is a where false query, should not be inserting anything INSERT INTO temp_lineitem SELECT * FROM air_shipped_lineitems WHERE l_shipmode = 'MAIL'; SELECT count(*) FROM temp_lineitem; - count -------- + count +--------------------------------------------------------------------- 1706 (1 row) @@ -33,8 +33,8 @@ SELECT count(*) FROM temp_lineitem; CREATE MATERIALIZED VIEW mode_counts AS SELECT l_shipmode, count(*) FROM temp_lineitem GROUP BY l_shipmode; SELECT * FROM mode_counts WHERE l_shipmode = 'AIR' ORDER BY 2 DESC, 1 LIMIT 10; - l_shipmode | count -------------+------- + l_shipmode | count +--------------------------------------------------------------------- AIR | 1706 (1 row) @@ -44,16 +44,16 @@ ERROR: relation mode_counts is not distributed -- new data is not immediately reflected in the view INSERT INTO temp_lineitem SELECT * FROM air_shipped_lineitems; SELECT * FROM mode_counts WHERE l_shipmode = 'AIR' ORDER BY 2 DESC, 1 LIMIT 10; - l_shipmode | count -------------+------- + l_shipmode | count +--------------------------------------------------------------------- AIR | 1706 (1 row) -- refresh updates the materialised view with new data REFRESH MATERIALIZED VIEW mode_counts; SELECT * FROM mode_counts WHERE l_shipmode = 'AIR' ORDER BY 2 DESC, 1 LIMIT 10; - l_shipmode | count -------------+------- + l_shipmode | count +--------------------------------------------------------------------- AIR | 3412 (1 row) @@ -66,8 +66,8 @@ FROM lineitem_hash_part, orders_hash_part, (SELECT SUM(l_extendedprice) AS price WHERE lineitem_hash_part.l_orderkey=orders_hash_part.o_orderkey AND lineitem_hash_part.l_orderkey=3; REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) @@ -79,8 +79,8 @@ FROM lineitem_hash_part, orders_hash_part, (SELECT SUM(l_extendedprice) AS price WHERE lineitem_hash_part.l_orderkey=orders_hash_part.o_orderkey; REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) @@ -93,8 +93,8 @@ FROM lineitem_hash_part, orders_hash_part, total_price WHERE lineitem_hash_part.l_orderkey=orders_hash_part.o_orderkey; REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) @@ -110,8 +110,8 @@ FROM orders_hash_part JOIN ( ON total_quantity.l_orderkey=orders_hash_part.o_orderkey; REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; - count -------- + count +--------------------------------------------------------------------- 2985 (1 row) @@ -123,8 +123,8 @@ FROM lineitem_hash_part, orders_reference, (SELECT SUM(o_totalprice) AS price_su WHERE lineitem_hash_part.l_orderkey=orders_reference.o_orderkey; REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) @@ -138,22 +138,22 @@ FROM lineitem_local_to_hash_part, orders_local_to_hash_part, (SELECT SUM(l_exten WHERE lineitem_local_to_hash_part.l_orderkey=orders_local_to_hash_part.o_orderkey; SELECT create_distributed_table('lineitem_local_to_hash_part', 'l_orderkey'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('orders_local_to_hash_part', 'o_orderkey'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) @@ -167,8 +167,8 @@ FROM lineitem_hash_part, orders_hash_part, (SELECT SUM(l_extendedprice) AS price WHERE lineitem_hash_part.l_orderkey=orders_hash_part.o_orderkey; REFRESH MATERIALIZED VIEW materialized_view WITH DATA; SELECT count(*) FROM materialized_view; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) @@ -192,8 +192,8 @@ GROUP BY orders_hash_part.o_orderdate; CREATE UNIQUE INDEX materialized_view_index ON materialized_view (o_orderdate); REFRESH MATERIALIZED VIEW CONCURRENTLY materialized_view; SELECT count(*) FROM materialized_view; - count -------- + count +--------------------------------------------------------------------- 1699 (1 row) @@ -205,15 +205,15 @@ NOTICE: drop cascades to view air_shipped_lineitems CREATE TABLE large (id int, tenant_id int); CREATE TABLE small (id int, tenant_id int); SELECT create_distributed_table('large','tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('small','tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \copy small FROM STDIN DELIMITER ',' @@ -226,8 +226,8 @@ ERROR: cannot change materialized view "small_view" UPDATE large SET id=20 FROM small_view WHERE small_view.id=large.id; ERROR: materialized views in modify queries are not supported SELECT * FROM large ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 1 | 2 2 | 3 5 | 4 @@ -238,8 +238,8 @@ SELECT * FROM large ORDER BY 1, 2; UPDATE large SET id=28 FROM small_view WHERE small_view.id=large.id and small_view.tenant_id=2 and large.tenant_id=2; ERROR: materialized views in modify queries are not supported SELECT * FROM large ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 1 | 2 2 | 3 5 | 4 @@ -249,8 +249,8 @@ SELECT * FROM large ORDER BY 1, 2; -- delete statement on large with subquery, this should succeed DELETE FROM large WHERE tenant_id in (SELECT tenant_id FROM small_view); SELECT * FROM large ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 6 | 5 (1 row) @@ -267,15 +267,15 @@ CREATE TABLE large_partitioned_p1 PARTITION OF large_partitioned FOR VALUES FROM CREATE TABLE large_partitioned_p2 PARTITION OF large_partitioned FOR VALUES FROM (10) TO (20); CREATE TABLE large_partitioned_p3 PARTITION OF large_partitioned FOR VALUES FROM (20) TO (100); SELECT create_distributed_table('large_partitioned','tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('small','tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \copy small FROM STDIN DELIMITER ',' @@ -290,8 +290,8 @@ ERROR: cannot change materialized view "small_view" UPDATE large_partitioned SET id=20 FROM small_view WHERE small_view.id=large_partitioned.id; ERROR: materialized views in modify queries are not supported SELECT * FROM large_partitioned ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 1 | 2 2 | 3 5 | 4 @@ -304,8 +304,8 @@ SELECT * FROM large_partitioned ORDER BY 1, 2; -- delete statement on large_partitioned DELETE FROM large_partitioned WHERE id in (SELECT id FROM small_view); SELECT * FROM large_partitioned ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 2 | 3 5 | 4 26 | 32 @@ -321,14 +321,14 @@ WITH all_small_view_ids AS (SELECT id FROM small_view) DELETE FROM large_partitioned WHERE id in (SELECT * FROM all_small_view_ids); -- make sure that materialized view in a CTE/subquery can be joined with a distributed table WITH cte AS (SELECT *, random() FROM small_view) SELECT count(*) FROM cte JOIN small USING(id); - count -------- + count +--------------------------------------------------------------------- 4 (1 row) SELECT count(*) FROM (SELECT *, random() FROM small_view) as subquery JOIN small USING(id); - count -------- + count +--------------------------------------------------------------------- 4 (1 row) diff --git a/src/test/regress/expected/multi_703_upgrade.out b/src/test/regress/expected/multi_703_upgrade.out index 8c6204b72..61c8b568f 100644 --- a/src/test/regress/expected/multi_703_upgrade.out +++ b/src/test/regress/expected/multi_703_upgrade.out @@ -8,15 +8,15 @@ INSERT INTO pg_dist_shard_placement (1, 1, 1, 0, 'localhost', :worker_1_port); -- if there are no worker nodes which match the shards this should fail ALTER EXTENSION citus UPDATE TO '7.0-3'; -ERROR: There is no node at "localhost:57637" +ERROR: There is no node at "localhost:xxxxx" CONTEXT: PL/pgSQL function citus.find_groupid_for_node(text,integer) line 6 at RAISE -- if you add a matching worker the upgrade should succeed INSERT INTO pg_dist_node (nodename, nodeport, groupid) VALUES ('localhost', :worker_1_port, 1); ALTER EXTENSION citus UPDATE TO '7.0-3'; SELECT * FROM pg_dist_placement; - placementid | shardid | shardstate | shardlength | groupid --------------+---------+------------+-------------+--------- + placementid | shardid | shardstate | shardlength | groupid +--------------------------------------------------------------------- 1 | 1 | 1 | 0 | 1 (1 row) diff --git a/src/test/regress/expected/multi_agg_approximate_distinct.out b/src/test/regress/expected/multi_agg_approximate_distinct.out index e17346e74..2831d7a7a 100644 --- a/src/test/regress/expected/multi_agg_approximate_distinct.out +++ b/src/test/regress/expected/multi_agg_approximate_distinct.out @@ -11,67 +11,67 @@ WHERE name = 'hll' :create_cmd; -- Try to execute count(distinct) when approximate distincts aren't enabled SELECT count(distinct l_orderkey) FROM lineitem; - count -------- + count +--------------------------------------------------------------------- 2985 (1 row) -- Check approximate count(distinct) at different precisions / error rates SET citus.count_distinct_error_rate = 0.1; SELECT count(distinct l_orderkey) FROM lineitem; - count -------- + count +--------------------------------------------------------------------- 2612 (1 row) SET citus.count_distinct_error_rate = 0.01; SELECT count(distinct l_orderkey) FROM lineitem; - count -------- + count +--------------------------------------------------------------------- 2967 (1 row) -- Check approximate count(distinct) for different data types SELECT count(distinct l_partkey) FROM lineitem; - count -------- + count +--------------------------------------------------------------------- 11654 (1 row) SELECT count(distinct l_extendedprice) FROM lineitem; - count -------- + count +--------------------------------------------------------------------- 11691 (1 row) SELECT count(distinct l_shipdate) FROM lineitem; - count -------- + count +--------------------------------------------------------------------- 2483 (1 row) SELECT count(distinct l_comment) FROM lineitem; - count -------- + count +--------------------------------------------------------------------- 11788 (1 row) -- Check that we can execute approximate count(distinct) on complex expressions SELECT count(distinct (l_orderkey * 2 + 1)) FROM lineitem; - count -------- + count +--------------------------------------------------------------------- 2980 (1 row) SELECT count(distinct extract(month from l_shipdate)) AS my_month FROM lineitem; - my_month ----------- + my_month +--------------------------------------------------------------------- 12 (1 row) SELECT count(distinct l_partkey) / count(distinct l_orderkey) FROM lineitem; - ?column? ----------- + ?column? +--------------------------------------------------------------------- 3 (1 row) @@ -79,15 +79,15 @@ SELECT count(distinct l_partkey) / count(distinct l_orderkey) FROM lineitem; -- contain different filter, join, sort and limit clauses SELECT count(distinct l_orderkey) FROM lineitem WHERE octet_length(l_comment) + octet_length('randomtext'::text) > 40; - count -------- + count +--------------------------------------------------------------------- 2355 (1 row) SELECT count(DISTINCT l_orderkey) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5; - count -------- + count +--------------------------------------------------------------------- 835 (1 row) @@ -96,8 +96,8 @@ SELECT count(DISTINCT l_orderkey) as distinct_order_count, l_quantity FROM linei GROUP BY l_quantity ORDER BY distinct_order_count ASC, l_quantity ASC LIMIT 10; - distinct_order_count | l_quantity -----------------------+------------ + distinct_order_count | l_quantity +--------------------------------------------------------------------- 210 | 29.00 216 | 13.00 217 | 16.00 @@ -122,25 +122,25 @@ CREATE TABLE test_count_distinct_schema.nation_hash( n_comment varchar(152) ); SELECT create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \copy test_count_distinct_schema.nation_hash FROM STDIN with delimiter '|'; SET search_path TO public; SET citus.count_distinct_error_rate TO 0.01; SELECT COUNT (DISTINCT n_regionkey) FROM test_count_distinct_schema.nation_hash; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) -- test with search_path is set SET search_path TO test_count_distinct_schema; SELECT COUNT (DISTINCT n_regionkey) FROM nation_hash; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) @@ -160,8 +160,8 @@ SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as t GROUP BY l_returnflag ORDER BY total LIMIT 10; - l_returnflag | count_distinct | total ---------------+----------------+------- + l_returnflag | count_distinct | total +--------------------------------------------------------------------- R | 1103 | 2901 A | 1108 | 2944 N | 1265 | 6155 @@ -176,8 +176,8 @@ SELECT GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_orderkey | count | count | count -------------+-------+-------+------- + l_orderkey | count | count | count +--------------------------------------------------------------------- 12005 | 4 | 4 | 4 5409 | 4 | 4 | 4 4964 | 4 | 4 | 4 @@ -193,8 +193,8 @@ SELECT -- Check that we can revert config and disable count(distinct) approximations SET citus.count_distinct_error_rate = 0.0; SELECT count(distinct l_orderkey) FROM lineitem; - count -------- + count +--------------------------------------------------------------------- 2985 (1 row) diff --git a/src/test/regress/expected/multi_agg_approximate_distinct_0.out b/src/test/regress/expected/multi_agg_approximate_distinct_0.out index b714968d8..43fb5dfc0 100644 --- a/src/test/regress/expected/multi_agg_approximate_distinct_0.out +++ b/src/test/regress/expected/multi_agg_approximate_distinct_0.out @@ -9,15 +9,15 @@ AS create_cmd FROM pg_available_extensions() WHERE name = 'hll' \gset :create_cmd; - hll_present -------------- + hll_present +--------------------------------------------------------------------- f (1 row) -- Try to execute count(distinct) when approximate distincts aren't enabled SELECT count(distinct l_orderkey) FROM lineitem; - count -------- + count +--------------------------------------------------------------------- 2985 (1 row) @@ -82,9 +82,9 @@ CREATE TABLE test_count_distinct_schema.nation_hash( n_comment varchar(152) ); SELECT create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \copy test_count_distinct_schema.nation_hash FROM STDIN with delimiter '|'; @@ -102,14 +102,14 @@ SET search_path TO public; -- If we have an order by on count(distinct) that we intend to push down to -- worker nodes, we need to error out. Otherwise, we are fine. SET citus.limit_clause_row_fetch_count = 1000; -SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as total +SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as total FROM lineitem GROUP BY l_returnflag ORDER BY count_distinct LIMIT 10; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. -SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as total +SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as total FROM lineitem GROUP BY l_returnflag ORDER BY total @@ -130,8 +130,8 @@ HINT: You need to have the hll extension loaded. -- Check that we can revert config and disable count(distinct) approximations SET citus.count_distinct_error_rate = 0.0; SELECT count(distinct l_orderkey) FROM lineitem; - count -------- + count +--------------------------------------------------------------------- 2985 (1 row) diff --git a/src/test/regress/expected/multi_alter_table_add_constraints.out b/src/test/regress/expected/multi_alter_table_add_constraints.out index 3458427a7..da3dcccf2 100644 --- a/src/test/regress/expected/multi_alter_table_add_constraints.out +++ b/src/test/regress/expected/multi_alter_table_add_constraints.out @@ -13,9 +13,9 @@ CREATE TABLE products ( price numeric ); SELECT create_distributed_table('products', 'product_no'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Can only add primary key constraint on distribution column (or group of columns @@ -31,14 +31,14 @@ INSERT INTO products VALUES(1, 'product_1', 1); INSERT INTO products VALUES(1, 'product_1', 1); ERROR: duplicate key value violates unique constraint "p_key_1450001" DETAIL: Key (product_no)=(1) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx ALTER TABLE products DROP CONSTRAINT p_key; INSERT INTO products VALUES(1, 'product_1', 1); -- Can not create constraint since it conflicts with the existing data ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(product_no); ERROR: could not create unique index "p_key_1450001" DETAIL: Key (product_no)=(1) is duplicated. -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx DROP TABLE products; -- Check "PRIMARY KEY CONSTRAINT" with reference table CREATE TABLE products_ref ( @@ -47,9 +47,9 @@ CREATE TABLE products_ref ( price numeric ); SELECT create_reference_table('products_ref'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- Can add PRIMARY KEY to any column @@ -62,7 +62,7 @@ INSERT INTO products_ref VALUES(1, 'product_1', 1); INSERT INTO products_ref VALUES(1, 'product_1', 1); ERROR: duplicate key value violates unique constraint "p_key_1450032" DETAIL: Key (product_no)=(1) already exists. -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx DROP TABLE products_ref; -- Check "PRIMARY KEY CONSTRAINT" on append table CREATE TABLE products_append ( @@ -71,9 +71,9 @@ CREATE TABLE products_append ( price numeric ); SELECT create_distributed_table('products_append', 'product_no', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Can only add primary key constraint on distribution column (or group @@ -97,9 +97,9 @@ DROP TABLE products_append; -- Check "UNIQUE CONSTRAINT" CREATE TABLE unique_test_table(id int, name varchar(20)); SELECT create_distributed_table('unique_test_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Can only add unique constraint on distribution column (or group @@ -113,16 +113,16 @@ ALTER TABLE unique_test_table ADD CONSTRAINT unn_id UNIQUE(id); INSERT INTO unique_test_table VALUES(1, 'Ahmet'); INSERT INTO unique_test_table VALUES(1, 'Mehmet'); ERROR: duplicate key value violates unique constraint "unn_id_1450035" -DETAIL: Key (id)=(1) already exists. -CONTEXT: while executing command on localhost:57638 +DETAIL: Key (id)=(X) already exists. +CONTEXT: while executing command on localhost:xxxxx ALTER TABLE unique_test_table DROP CONSTRAINT unn_id; -- Insert row which will conflict with the next unique constraint command INSERT INTO unique_test_table VALUES(1, 'Mehmet'); -- Can not create constraint since it conflicts with the existing data ALTER TABLE unique_test_table ADD CONSTRAINT unn_id UNIQUE(id); ERROR: could not create unique index "unn_id_1450035" -DETAIL: Key (id)=(1) is duplicated. -CONTEXT: while executing command on localhost:57637 +DETAIL: Key (id)=(X) is duplicated. +CONTEXT: while executing command on localhost:xxxxx -- Can create unique constraint over multiple columns which must include -- distribution column ALTER TABLE unique_test_table ADD CONSTRAINT unn_id_name UNIQUE(id, name); @@ -130,26 +130,26 @@ ALTER TABLE unique_test_table ADD CONSTRAINT unn_id_name UNIQUE(id, name); INSERT INTO unique_test_table VALUES(1, 'Mehmet'); ERROR: duplicate key value violates unique constraint "unn_id_name_1450035" DETAIL: Key (id, name)=(1, Mehmet) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx DROP TABLE unique_test_table; -- Check "UNIQUE CONSTRAINT" with reference table CREATE TABLE unique_test_table_ref(id int, name varchar(20)); SELECT create_reference_table('unique_test_table_ref'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- We can add unique constraint on any column with reference tables ALTER TABLE unique_test_table_ref ADD CONSTRAINT unn_name UNIQUE(name); ALTER TABLE unique_test_table_ref ADD CONSTRAINT unn_id UNIQUE(id); --- Error out. Since the table can not have two rows with the same id. +-- Error out. Since the table can not have two rows with the same id. INSERT INTO unique_test_table_ref VALUES(1, 'Ahmet'); INSERT INTO unique_test_table_ref VALUES(1, 'Mehmet'); ERROR: duplicate key value violates unique constraint "unn_id_1450066" -DETAIL: Key (id)=(1) already exists. -CONTEXT: while executing command on localhost:57637 --- We can add unique constraint with multiple columns +DETAIL: Key (id)=(X) already exists. +CONTEXT: while executing command on localhost:xxxxx +-- We can add unique constraint with multiple columns ALTER TABLE unique_test_table_ref DROP CONSTRAINT unn_id; ALTER TABLE unique_test_table_ref ADD CONSTRAINT unn_id_name UNIQUE(id,name); -- Error out, since two rows can not have the same id or name. @@ -158,9 +158,9 @@ DROP TABLE unique_test_table_ref; -- Check "UNIQUE CONSTRAINT" with append table CREATE TABLE unique_test_table_append(id int, name varchar(20)); SELECT create_distributed_table('unique_test_table_append', 'id', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Can only add unique constraint on distribution column (or group @@ -179,7 +179,7 @@ HINT: Consider using hash partitioning. -- Error out. Table can not have two rows with the same id. \COPY unique_test_table_append FROM STDIN DELIMITER AS ','; ERROR: duplicate key value violates unique constraint "unn_id_1450067" -DETAIL: Key (id)=(1) already exists. +DETAIL: Key (id)=(X) already exists. DROP TABLE unique_test_table_append; -- Check "CHECK CONSTRAINT" CREATE TABLE products ( @@ -189,27 +189,27 @@ CREATE TABLE products ( discounted_price numeric ); SELECT create_distributed_table('products', 'product_no'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Can add column and table check constraints ALTER TABLE products ADD CONSTRAINT p_check CHECK(price > 0); ALTER TABLE products ADD CONSTRAINT p_multi_check CHECK(price > discounted_price); --- First and third queries will error out, because of conflicts with p_check and +-- First and third queries will error out, because of conflicts with p_check and -- p_multi_check, respectively. INSERT INTO products VALUES(1, 'product_1', -1, -2); ERROR: new row for relation "products_1450069" violates check constraint "p_check_1450069" DETAIL: Failing row contains (1, product_1, -1, -2). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx INSERT INTO products VALUES(1, 'product_1', 5, 3); INSERT INTO products VALUES(1, 'product_1', 2, 3); ERROR: new row for relation "products_1450069" violates check constraint "p_multi_check_1450069" DETAIL: Failing row contains (1, product_1, 2, 3). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx DROP TABLE products; --- Check "CHECK CONSTRAINT" with reference table +-- Check "CHECK CONSTRAINT" with reference table CREATE TABLE products_ref ( product_no integer, name text, @@ -217,25 +217,25 @@ CREATE TABLE products_ref ( discounted_price numeric ); SELECT create_reference_table('products_ref'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- Can add column and table check constraints ALTER TABLE products_ref ADD CONSTRAINT p_check CHECK(price > 0); ALTER TABLE products_ref ADD CONSTRAINT p_multi_check CHECK(price > discounted_price); --- First and third queries will error out, because of conflicts with p_check and +-- First and third queries will error out, because of conflicts with p_check and -- p_multi_check, respectively. INSERT INTO products_ref VALUES(1, 'product_1', -1, -2); ERROR: new row for relation "products_ref_1450100" violates check constraint "p_check_1450100" DETAIL: Failing row contains (1, product_1, -1, -2). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx INSERT INTO products_ref VALUES(1, 'product_1', 5, 3); INSERT INTO products_ref VALUES(1, 'product_1', 2, 3); ERROR: new row for relation "products_ref_1450100" violates check constraint "p_multi_check_1450100" DETAIL: Failing row contains (1, product_1, 2, 3). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx DROP TABLE products_ref; -- Check "CHECK CONSTRAINT" with append table CREATE TABLE products_append ( @@ -245,9 +245,9 @@ CREATE TABLE products_append ( discounted_price int ); SELECT create_distributed_table('products_append', 'product_no', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Can add column and table check constraints @@ -265,18 +265,18 @@ CREATE TABLE products ( price numeric ); SELECT create_distributed_table('products', 'product_no'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Can only add exclusion constraint on distribution column (or group of columns -- including distribution column) -- Command below should error out since 'name' is not a distribution column -ALTER TABLE products ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); +ALTER TABLE products ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); ERROR: cannot create constraint on "products" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). --- We can add composite exclusion +-- We can add composite exclusion ALTER TABLE products ADD CONSTRAINT exc_pno_name EXCLUDE USING btree (product_no with =, name with =); -- 4th command will error out since it conflicts with exc_pno_name constraint INSERT INTO products VALUES(1,'product_1', 5); @@ -285,7 +285,7 @@ INSERT INTO products VALUES(2,'product_2', 5); INSERT INTO products VALUES(2,'product_2', 5); ERROR: conflicting key value violates exclusion constraint "exc_pno_name_1450126" DETAIL: Key (product_no, name)=(2, product_2) conflicts with existing key (product_no, name)=(2, product_2). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx DROP TABLE products; -- Check "EXCLUSION CONSTRAINT" with reference table CREATE TABLE products_ref ( @@ -294,13 +294,13 @@ CREATE TABLE products_ref ( price numeric ); SELECT create_reference_table('products_ref'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- We can add exclusion constraint on any column -ALTER TABLE products_ref ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); +ALTER TABLE products_ref ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); -- We can add composite exclusion because none of pair of rows are conflicting ALTER TABLE products_ref ADD CONSTRAINT exc_pno_name EXCLUDE USING btree (product_no with =, name with =); -- Third insertion will error out, since it has the same name with second insertion @@ -309,7 +309,7 @@ INSERT INTO products_ref VALUES(1,'product_2', 10); INSERT INTO products_ref VALUES(2,'product_2', 5); ERROR: conflicting key value violates exclusion constraint "exc_name_1450134" DETAIL: Key (name)=(product_2) conflicts with existing key (name)=(product_2). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx DROP TABLE products_ref; -- Check "EXCLUSION CONSTRAINT" with append table CREATE TABLE products_append ( @@ -318,15 +318,15 @@ CREATE TABLE products_append ( price numeric ); SELECT create_distributed_table('products_append', 'product_no','append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) --- Can only add exclusion constraint on distribution column (or group of column +-- Can only add exclusion constraint on distribution column (or group of column -- including distribution column) -- Command below should error out since 'name' is not a distribution column -ALTER TABLE products_append ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); +ALTER TABLE products_append ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); WARNING: table "products_append" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. @@ -348,9 +348,9 @@ CREATE TABLE products ( price numeric ); SELECT create_distributed_table('products', 'product_no'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE products ALTER COLUMN name SET NOT NULL; @@ -358,7 +358,7 @@ ALTER TABLE products ALTER COLUMN name SET NOT NULL; INSERT INTO products VALUES(1,NULL,5); ERROR: null value in column "name" violates not-null constraint DETAIL: Failing row contains (1, null, 5). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx INSERT INTO products VALUES(NULL,'product_1', 5); ERROR: cannot perform an INSERT with NULL in the partition column DROP TABLE products; @@ -369,9 +369,9 @@ CREATE TABLE products_ref ( price numeric ); SELECT create_reference_table('products_ref'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE products_ref ALTER COLUMN name SET NOT NULL; @@ -379,7 +379,7 @@ ALTER TABLE products_ref ALTER COLUMN name SET NOT NULL; INSERT INTO products_ref VALUES(1,NULL,5); ERROR: null value in column "name" violates not-null constraint DETAIL: Failing row contains (1, null, 5). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx INSERT INTO products_ref VALUES(NULL,'product_1', 5); DROP TABLE products_ref; -- Check "NOT NULL" with append table @@ -389,9 +389,9 @@ CREATE TABLE products_append ( price numeric ); SELECT create_distributed_table('products_append', 'product_no', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE products_append ALTER COLUMN name SET NOT NULL; @@ -405,9 +405,9 @@ CREATE TABLE products ( price numeric ); SELECT create_distributed_table('products', 'product_no'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Should error out since add constraint is not the single subcommand @@ -415,7 +415,7 @@ ALTER TABLE products ADD CONSTRAINT unn_1 UNIQUE(product_no, price), ADD CONSTRA ERROR: cannot execute ADD CONSTRAINT command with other subcommands HINT: You can issue each subcommand separately -- Tests for constraints without name --- Commands below should error out since constraints do not have the name +-- Commands below should error out since constraints do not have the name ALTER TABLE products ADD UNIQUE(product_no); ERROR: cannot create constraint without a name on a distributed table ALTER TABLE products ADD PRIMARY KEY(product_no); @@ -430,7 +430,7 @@ ALTER TABLE products ADD CONSTRAINT uniq_product_no EXCLUDE USING btree (product ALTER TABLE products DROP CONSTRAINT nonzero_product_no; ALTER TABLE products DROP CONSTRAINT uniq_product_no; DROP TABLE products; --- Tests with transactions +-- Tests with transactions CREATE TABLE products ( product_no integer, name text, @@ -438,9 +438,9 @@ CREATE TABLE products ( discounted_price numeric ); SELECT create_distributed_table('products', 'product_no'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -456,16 +456,16 @@ ALTER TABLE products ALTER COLUMN product_no SET NOT NULL; ALTER TABLE products ADD CONSTRAINT p_key_product PRIMARY KEY(product_no); INSERT INTO products VALUES(1,'product_1', 10, 8); ROLLBACK; --- There should be no constraint on master and worker(s) +-- There should be no constraint on master and worker(s) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass; - Constraint | Definition -------------+------------ + Constraint | Definition +--------------------------------------------------------------------- (0 rows) \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass; - Constraint | Definition -------------+------------ + Constraint | Definition +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -476,16 +476,16 @@ ALTER TABLE products ADD CONSTRAINT unn_pno UNIQUE(product_no); ALTER TABLE products ADD CONSTRAINT check_price CHECK(price > discounted_price); ALTER TABLE products ADD CONSTRAINT p_key_product PRIMARY KEY(product_no); ROLLBACK; --- There should be no constraint on master and worker(s) +-- There should be no constraint on master and worker(s) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass; - Constraint | Definition -------------+------------ + Constraint | Definition +--------------------------------------------------------------------- (0 rows) \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass; - Constraint | Definition -------------+------------ + Constraint | Definition +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -497,27 +497,27 @@ CREATE TABLE sc1.alter_add_prim_key(x int, y int); CREATE UNIQUE INDEX CONCURRENTLY alter_pk_idx ON sc1.alter_add_prim_key(x); ALTER TABLE sc1.alter_add_prim_key ADD CONSTRAINT alter_pk_idx PRIMARY KEY USING INDEX alter_pk_idx; SELECT create_distributed_table('sc1.alter_add_prim_key', 'x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT (run_command_on_workers($$ - SELECT - kc.constraint_name - FROM + SELECT + kc.constraint_name + FROM information_schema.table_constraints tc join information_schema.key_column_usage kc on (kc.table_name = tc.table_name and kc.table_schema = tc.table_schema and kc.constraint_name = tc.constraint_name) WHERE - kc.table_schema = 'sc1' and tc.constraint_type = 'PRIMARY KEY' and kc.table_name LIKE 'alter_add_prim_key_%' - ORDER BY + kc.table_schema = 'sc1' and tc.constraint_type = 'PRIMARY KEY' and kc.table_name LIKE 'alter_add_prim_key_%' + ORDER BY 1 - LIMIT + LIMIT 1; $$)).* -ORDER BY +ORDER BY 1,2,3,4; - nodename | nodeport | success | result ------------+----------+---------+---------------------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | alter_pk_idx_1450234 localhost | 57638 | t | alter_pk_idx_1450234 (2 rows) @@ -526,29 +526,29 @@ CREATE SCHEMA sc2; CREATE TABLE sc2.alter_add_prim_key(x int, y int); SET search_path TO 'sc2'; SELECT create_distributed_table('alter_add_prim_key', 'x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE UNIQUE INDEX CONCURRENTLY alter_pk_idx ON alter_add_prim_key(x); ALTER TABLE alter_add_prim_key ADD CONSTRAINT alter_pk_idx PRIMARY KEY USING INDEX alter_pk_idx; SELECT (run_command_on_workers($$ - SELECT - kc.constraint_name - FROM + SELECT + kc.constraint_name + FROM information_schema.table_constraints tc join information_schema.key_column_usage kc on (kc.table_name = tc.table_name and kc.table_schema = tc.table_schema and kc.constraint_name = tc.constraint_name) WHERE kc.table_schema = 'sc2' and tc.constraint_type = 'PRIMARY KEY' and kc.table_name LIKE 'alter_add_prim_key_%' - ORDER BY + ORDER BY 1 - LIMIT + LIMIT 1; $$)).* -ORDER BY +ORDER BY 1,2,3,4; - nodename | nodeport | success | result ------------+----------+---------+---------------------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | alter_pk_idx_1450236 localhost | 57638 | t | alter_pk_idx_1450236 (2 rows) @@ -560,53 +560,53 @@ INSERT INTO sc3.alter_add_prim_key(x) SELECT generate_series(1,100); SET search_path TO 'sc3'; SELECT create_distributed_table('alter_add_prim_key', 'x'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE UNIQUE INDEX CONCURRENTLY alter_pk_idx ON alter_add_prim_key(x); ALTER TABLE alter_add_prim_key ADD CONSTRAINT a_constraint PRIMARY KEY USING INDEX alter_pk_idx; NOTICE: ALTER TABLE / ADD CONSTRAINT USING INDEX will rename index "alter_pk_idx" to "a_constraint" SELECT (run_command_on_workers($$ - SELECT - kc.constraint_name - FROM + SELECT + kc.constraint_name + FROM information_schema.table_constraints tc join information_schema.key_column_usage kc on (kc.table_name = tc.table_name and kc.table_schema = tc.table_schema and kc.constraint_name = tc.constraint_name) WHERE kc.table_schema = 'sc3' and tc.constraint_type = 'PRIMARY KEY' and kc.table_name LIKE 'alter_add_prim_key_%' - ORDER BY + ORDER BY 1 - LIMIT + LIMIT 1; $$)).* -ORDER BY +ORDER BY 1,2,3,4; - nodename | nodeport | success | result ------------+----------+---------+---------------------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | a_constraint_1450238 localhost | 57638 | t | a_constraint_1450238 (2 rows) ALTER TABLE alter_add_prim_key DROP CONSTRAINT a_constraint; SELECT (run_command_on_workers($$ - SELECT - kc.constraint_name - FROM + SELECT + kc.constraint_name + FROM information_schema.table_constraints tc join information_schema.key_column_usage kc on (kc.table_name = tc.table_name and kc.table_schema = tc.table_schema and kc.constraint_name = tc.constraint_name) WHERE kc.table_schema = 'sc3' and tc.constraint_type = 'PRIMARY KEY' and kc.table_name LIKE 'alter_add_prim_key_%' - ORDER BY + ORDER BY 1 - LIMIT + LIMIT 1; $$)).* -ORDER BY +ORDER BY 1,2,3,4; - nodename | nodeport | success | result ------------+----------+---------+-------- - localhost | 57637 | t | - localhost | 57638 | t | + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | + localhost | 57638 | t | (2 rows) SET search_path TO 'public'; diff --git a/src/test/regress/expected/multi_array_agg.out b/src/test/regress/expected/multi_array_agg.out index ad1e00aa2..dd673e11d 100644 --- a/src/test/regress/expected/multi_array_agg.out +++ b/src/test/regress/expected/multi_array_agg.out @@ -9,8 +9,8 @@ SELECT ARRAY(SELECT unnest($1) ORDER BY 1) $$; -- Check multi_cat_agg() aggregate which is used to implement array_agg() SELECT array_cat_agg(i) FROM (VALUES (ARRAY[1,2]), (NULL), (ARRAY[3,4])) AS t(i); - array_cat_agg ---------------- + array_cat_agg +--------------------------------------------------------------------- {1,2,3,4} (1 row) @@ -24,8 +24,8 @@ ERROR: array_agg with order by is unsupported -- Check array_agg() for different data types and LIMIT clauses SELECT array_sort(array_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort --------------------------------------------------- + array_sort +--------------------------------------------------------------------- {2132,15635,24027,63700,67310,155190} {106170} {4297,19036,29380,62143,128449,183095} @@ -40,8 +40,8 @@ SELECT array_sort(array_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(array_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort ------------------------------------------------------------------ + array_sort +--------------------------------------------------------------------- {13309.60,21168.23,22824.48,28955.64,45983.16,49620.16} {44694.46} {2618.76,28733.64,32986.52,39890.88,46796.47,54058.05} @@ -56,8 +56,8 @@ SELECT array_sort(array_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(array_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort --------------------------------------------------------------------------------- + array_sort +--------------------------------------------------------------------- {01-29-1996,01-30-1996,03-13-1996,03-30-1996,04-12-1996,04-21-1996} {01-28-1997} {10-29-1993,11-09-1993,12-04-1993,12-14-1993,01-16-1994,02-02-1994} @@ -72,8 +72,8 @@ SELECT array_sort(array_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(array_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort ----------------------------------------------------------------------------------------------- + array_sort +--------------------------------------------------------------------- {"AIR ","FOB ","MAIL ","MAIL ","REG AIR ","TRUCK "} {"RAIL "} {"AIR ","FOB ","RAIL ","RAIL ","SHIP ","TRUCK "} @@ -88,8 +88,8 @@ SELECT array_sort(array_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey -- Check that we can execute array_agg() within other functions SELECT array_length(array_agg(l_orderkey), 1) FROM lineitem; - array_length --------------- + array_length +--------------------------------------------------------------------- 12000 (1 row) @@ -100,8 +100,8 @@ SELECT array_length(array_agg(l_orderkey), 1) FROM lineitem; SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(array_agg(l_orderkey)) FROM lineitem WHERE l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | count | avg | array_sort -------------+-------+-----------------------+-------------------------------------------------------------------------------------------------- + l_quantity | count | avg | array_sort +--------------------------------------------------------------------- 1.00 | 17 | 1477.1258823529411765 | {5543,5633,5634,5698,5766,5856,5857,5986,8997,9026,9158,9184,9220,9222,9348,9383,9476} 2.00 | 19 | 3078.4242105263157895 | {5506,5540,5573,5669,5703,5730,5798,5831,5893,5920,5923,9030,9058,9123,9124,9188,9344,9441,9476} 3.00 | 14 | 4714.0392857142857143 | {5509,5543,5605,5606,5827,9124,9157,9184,9223,9254,9349,9414,9475,9477} @@ -111,8 +111,8 @@ SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(array_agg(l_orderk SELECT l_quantity, array_sort(array_agg(extract (month FROM o_orderdate))) AS my_month FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | my_month -------------+------------------------------------------------ + l_quantity | my_month +--------------------------------------------------------------------- 1.00 | {2,3,4,4,4,5,5,5,6,7,7,7,7,9,9,11,11} 2.00 | {1,3,5,5,5,5,6,6,6,7,7,8,10,10,11,11,11,12,12} 3.00 | {3,4,5,6,7,7,8,8,8,9,9,10,11,11} @@ -122,8 +122,8 @@ SELECT l_quantity, array_sort(array_agg(extract (month FROM o_orderdate))) AS my SELECT l_quantity, array_sort(array_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE l_quantity < 5 AND octet_length(l_comment) + octet_length('randomtext'::text) > 40 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | array_sort -------------+--------------------------------------------- + l_quantity | array_sort +--------------------------------------------------------------------- 1.00 | {11269,11397,11713,11715,11973,18317,18445} 2.00 | {11847,18061,18247,18953} 3.00 | {18249,18315,18699,18951,18955} @@ -133,15 +133,15 @@ SELECT l_quantity, array_sort(array_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE -- Check that we can execute array_agg() with an expression containing NULL values SELECT array_agg(case when l_quantity > 20 then l_quantity else NULL end) FROM lineitem WHERE l_orderkey < 10; - array_agg --------------------------------------------------------------------------------------------------------------------------------------------------- + array_agg +--------------------------------------------------------------------- {NULL,36.00,NULL,28.00,24.00,32.00,38.00,45.00,49.00,27.00,NULL,28.00,26.00,30.00,NULL,26.00,50.00,37.00,NULL,NULL,46.00,28.00,38.00,35.00,NULL} (1 row) -- Check that we return NULL in case there are no input rows to array_agg() SELECT array_agg(l_orderkey) FROM lineitem WHERE l_quantity < 0; - array_agg ------------ - + array_agg +--------------------------------------------------------------------- + (1 row) diff --git a/src/test/regress/expected/multi_average_expression.out b/src/test/regress/expected/multi_average_expression.out index 74511d553..f03a9ecdd 100644 --- a/src/test/regress/expected/multi_average_expression.out +++ b/src/test/regress/expected/multi_average_expression.out @@ -25,8 +25,8 @@ GROUP BY ORDER BY l_returnflag, l_linestatus; - sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order | l_returnflag | l_linestatus ------------+----------------+----------------+------------------+---------------------+--------------------+------------------------+-------------+--------------+-------------- + sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order | l_returnflag | l_linestatus +--------------------------------------------------------------------- 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 | A | F 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 | N | F 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 | N | O @@ -45,8 +45,8 @@ SELECT end) FROM lineitem; - avg ---------------------- + avg +--------------------------------------------------------------------- 35.3570440077497924 (1 row) @@ -58,8 +58,8 @@ SELECT end) FROM lineitem; - avg ------ - + avg +--------------------------------------------------------------------- + (1 row) diff --git a/src/test/regress/expected/multi_basic_queries.out b/src/test/regress/expected/multi_basic_queries.out index 13393acff..ddaf50b60 100644 --- a/src/test/regress/expected/multi_basic_queries.out +++ b/src/test/regress/expected/multi_basic_queries.out @@ -4,20 +4,20 @@ -- Execute simple sum, average, and count queries on data recently uploaded to -- our partitioned table. SELECT count(*) FROM lineitem; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) SELECT sum(l_extendedprice) FROM lineitem; - sum --------------- + sum +--------------------------------------------------------------------- 457702024.50 (1 row) SELECT avg(l_extendedprice) FROM lineitem; - avg --------------------- + avg +--------------------------------------------------------------------- 38141.835375000000 (1 row) @@ -25,16 +25,16 @@ SELECT avg(l_extendedprice) FROM lineitem; BEGIN; SET TRANSACTION READ ONLY; SELECT count(*) FROM lineitem; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) COMMIT; -- Verify temp tables which are used for final result aggregation don't persist. SELECT count(*) FROM pg_class WHERE relname LIKE 'pg_merge_job_%' AND relkind = 'r'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/multi_behavioral_analytics_basics.out b/src/test/regress/expected/multi_behavioral_analytics_basics.out index 45ee0c27a..046eb4eeb 100644 --- a/src/test/regress/expected/multi_behavioral_analytics_basics.out +++ b/src/test/regress/expected/multi_behavioral_analytics_basics.out @@ -1,8 +1,8 @@ ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Vanilla funnel query ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- INSERT INTO agg_results (user_id, value_1_agg) SELECT user_id, array_length(events_table, 1) FROM ( @@ -20,16 +20,16 @@ FROM ( ) q; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 2 | 2 | 1.5000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Funnel grouped by whether or not a user has done an event ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- INSERT INTO agg_results (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) FROM ( @@ -69,16 +69,16 @@ FROM ( ) t GROUP BY user_id, hasdone_event; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 4 | 2 | 1.5000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Funnel, grouped by the number of times a user has done an event ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- INSERT INTO agg_results (user_id, value_1_agg, value_2_agg) SELECT user_id, @@ -146,18 +146,18 @@ ORDER BY count_pay; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 7 | 3 | 1.7142857142857143 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Most recently seen users_table events_table ------------------------------------- +--------------------------------------------------------------------- -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results (user_id, agg_time, value_2_agg) SELECT @@ -187,16 +187,16 @@ FROM ( ORDER BY user_lastseen DESC; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 3 | 3 | 2.0000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Count the number of distinct users_table who are in segment X and Y and Z ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results (user_id) SELECT DISTINCT user_id @@ -206,16 +206,16 @@ WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1 AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 5 AND value_1 <= 6); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 5 | 5 | 3.8000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Count the number of distinct users_table who are in at least two of X and Y and Z segments ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id) SELECT user_id @@ -227,16 +227,16 @@ GROUP BY user_id HAVING count(distinct value_1) >= 2; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find customers who have done X, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -245,16 +245,16 @@ SELECT user_id, value_2 FROM users_table WHERE AND EXISTS (SELECT user_id FROM events_table WHERE event_type>1 AND event_type < 5 AND value_3 > 2 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 20 | 6 | 3.7500000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who haven’t done X, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -263,16 +263,16 @@ SELECT user_id, value_2 FROM users_table WHERE AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type=1 AND value_3 > 4 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 4 | 2 | 4.2500000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X and Y, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -282,16 +282,16 @@ SELECT user_id, value_2 FROM users_table WHERE AND EXISTS (SELECT user_id FROM events_table WHERE event_type=2 AND value_3 > 1 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 29 | 5 | 3.1034482758620690 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -300,16 +300,16 @@ SELECT user_id, value_2 FROM users_table WHERE AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 3 AND event_type <= 4 AND value_3 > 1 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 11 | 1 | 5.0000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X more than 2 times, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, @@ -328,16 +328,16 @@ INSERT INTO agg_results(user_id, value_2_agg) HAVING Count(*) > 2); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 4 | 2 | 3.5000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find me all users_table who logged in more than once ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_1_agg) SELECT user_id, value_1 from @@ -347,16 +347,16 @@ SELECT user_id, value_1 from ) as a; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg --------+-------+------------------------ + count | count | avg +--------------------------------------------------------------------- 1 | 1 | 1.00000000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find me all users_table who has done some event and has filters ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id) Select user_id @@ -370,16 +370,16 @@ And user_id in And value_2 > 1); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 11 | 4 | 3.1818181818181818 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Which events_table did people who has done some specific events_table ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_1_agg) SELECT user_id, event_type FROM events_table @@ -387,16 +387,16 @@ WHERE user_id in (SELECT user_id from events_table WHERE event_type > 3 and even GROUP BY user_id, event_type; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 34 | 6 | 3.4411764705882353 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find me all the users_table who has done some event more than three times ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id) select user_id from @@ -409,16 +409,16 @@ where event_type = 4 group by user_id having count(*) > 3 ) as a; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 4 | 4 | 2.5000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find my assets that have the highest probability and fetch their metadata ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_1_agg, value_3_agg) SELECT @@ -437,8 +437,8 @@ FROM WHERE users_table.value_1 < 3; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 3488 | 6 | 3.5372706422018349 (1 row) @@ -461,8 +461,8 @@ FROM WHERE temp.value_1 < 3; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -485,8 +485,8 @@ FROM WHERE temp.value_1 < 3 ORDER BY 1, 2; SELECT count(*), count(DISTINCT user_id), avg(user_id), avg(value_1_agg) FROM agg_results; - count | count | avg | avg --------+-------+--------------------+------------------------ + count | count | avg | avg +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 | 0.16666666666666666667 (1 row) @@ -508,8 +508,8 @@ FROM ON users_ids.user_id = temp.user_id ORDER BY 1, 2; SELECT count(*), count(DISTINCT user_id), avg(user_id), avg(value_1_agg) FROM agg_results; - count | count | avg | avg --------+-------+--------------------+------------------------ + count | count | avg | avg +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 | 0.16666666666666666667 (1 row) diff --git a/src/test/regress/expected/multi_behavioral_analytics_single_shard_queries.out b/src/test/regress/expected/multi_behavioral_analytics_single_shard_queries.out index 684bbe02b..3f3025267 100644 --- a/src/test/regress/expected/multi_behavioral_analytics_single_shard_queries.out +++ b/src/test/regress/expected/multi_behavioral_analytics_single_shard_queries.out @@ -1,8 +1,8 @@ ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Vanilla funnel query -- single shard ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, value_1_agg) SELECT user_id, array_length(events_table, 1) @@ -22,16 +22,16 @@ FROM ( WHERE user_id = 2; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 1 | 1 | 2.0000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Vanilla funnel query -- two shards ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, value_1_agg) SELECT user_id, array_length(events_table, 1) @@ -51,16 +51,16 @@ FROM ( WHERE (user_id = 1 OR user_id = 2); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 2 | 2 | 1.5000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Funnel grouped by whether or not a user has done an event -- single shard query ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) @@ -100,11 +100,11 @@ FROM ( WHERE t1.user_id = 2 GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event; ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Funnel grouped by whether or not a user has done an event -- two shards query ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) @@ -144,18 +144,18 @@ FROM ( ) t GROUP BY user_id, hasdone_event; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 1 | 1 | 2.0000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Most recently seen users_table events_table -- single shard query ------------------------------------- +--------------------------------------------------------------------- -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, agg_time, value_2_agg) SELECT @@ -186,18 +186,18 @@ FROM ( ORDER BY user_lastseen DESC; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 1 | 1 | 5.0000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Most recently seen users_table events_table -- two shards query ------------------------------------- +--------------------------------------------------------------------- -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, agg_time, value_2_agg) SELECT @@ -229,16 +229,16 @@ FROM ( ORDER BY user_lastseen DESC; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 2 | 2 | 3.0000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Count the number of distinct users_table who are in segment X and Y and Z -- single shard ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id) SELECT DISTINCT user_id @@ -249,16 +249,16 @@ WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1 AND user_id = 1; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg --------+-------+------------------------ + count | count | avg +--------------------------------------------------------------------- 1 | 1 | 1.00000000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Count the number of distinct users_table who are in segment X and Y and Z -- two shards ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id) SELECT DISTINCT user_id @@ -269,16 +269,16 @@ WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1 AND (user_id = 1 OR user_id = 2); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg --------+-------+------------------------ + count | count | avg +--------------------------------------------------------------------- 1 | 1 | 1.00000000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find customers who have done X, and satisfy other customer specific criteria -- single shard ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -288,16 +288,16 @@ SELECT user_id, value_2 FROM users_table WHERE AND user_id = 2; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 7 | 1 | 2.0000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find customers who have done X, and satisfy other customer specific criteria -- two shards ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -307,16 +307,16 @@ SELECT user_id, value_2 FROM users_table WHERE AND (user_id = 2 OR user_id = 1); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 10 | 2 | 1.7000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria -- single shard ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -326,16 +326,16 @@ SELECT user_id, value_2 FROM users_table WHERE AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 4 AND event_type <= 5 AND value_3 > 4 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg --------+-------+------------------------ + count | count | avg +--------------------------------------------------------------------- 6 | 1 | 1.00000000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria -- two shards ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -345,16 +345,16 @@ SELECT user_id, value_2 FROM users_table WHERE AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 4 AND event_type <= 5 AND value_3 > 4 AND user_id=users_table.user_id AND (user_id = 1 OR user_id = 2)); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 20 | 2 | 1.7000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X more than 2 times, and satisfy other customer specific criteria -- single shard ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, @@ -375,16 +375,16 @@ INSERT INTO agg_results_second(user_id, value_2_agg) HAVING Count(*) > 2); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 2 | 1 | 3.0000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X more than 2 times, and satisfy other customer specific criteria -- two shards ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, @@ -404,8 +404,8 @@ INSERT INTO agg_results_second(user_id, value_2_agg) HAVING Count(*) > 2); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 4 | 2 | 3.5000000000000000 (1 row) diff --git a/src/test/regress/expected/multi_binary_master_copy_format.out b/src/test/regress/expected/multi_binary_master_copy_format.out index 8ff28f867..3d506408e 100644 --- a/src/test/regress/expected/multi_binary_master_copy_format.out +++ b/src/test/regress/expected/multi_binary_master_copy_format.out @@ -6,29 +6,29 @@ SET citus.next_shard_id TO 430000; SET citus.binary_master_copy_format TO 'on'; SET citus.task_executor_type TO 'task-tracker'; SELECT count(*) FROM lineitem; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) SELECT l_shipmode FROM lineitem WHERE l_partkey = 67310 OR l_partkey = 155190; - l_shipmode ------------- - TRUCK - MAIL + l_shipmode +--------------------------------------------------------------------- + TRUCK + MAIL (2 rows) RESET citus.task_executor_type; SELECT count(*) FROM lineitem; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) SELECT l_shipmode FROM lineitem WHERE l_partkey = 67310 OR l_partkey = 155190; - l_shipmode ------------- - TRUCK - MAIL + l_shipmode +--------------------------------------------------------------------- + TRUCK + MAIL (2 rows) diff --git a/src/test/regress/expected/multi_cache_invalidation.out b/src/test/regress/expected/multi_cache_invalidation.out index de1d80a54..285275ae2 100644 --- a/src/test/regress/expected/multi_cache_invalidation.out +++ b/src/test/regress/expected/multi_cache_invalidation.out @@ -6,22 +6,22 @@ SET citus.next_shard_id TO 1601000; CREATE TABLE mci_1.test (test_id integer NOT NULL, data int); CREATE TABLE mci_2.test (test_id integer NOT NULL, data int); SELECT create_distributed_table('mci_1.test', 'test_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('mci_2.test', 'test_id', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO mci_1.test VALUES (1,2), (3,4); -- move shards into other append-distributed table SELECT run_command_on_placements('mci_1.test', 'ALTER TABLE %s SET SCHEMA mci_2'); - run_command_on_placements -------------------------------------------- + run_command_on_placements +--------------------------------------------------------------------- (localhost,57637,1601000,t,"ALTER TABLE") (localhost,57638,1601000,t,"ALTER TABLE") (localhost,57637,1601001,t,"ALTER TABLE") @@ -36,8 +36,8 @@ UPDATE pg_dist_shard SET logicalrelid = 'mci_2.test'::regclass, shardminvalue = NULL, shardmaxvalue = NULL WHERE logicalrelid = 'mci_1.test'::regclass; SELECT * FROM mci_2.test ORDER BY test_id; - test_id | data ----------+------ + test_id | data +--------------------------------------------------------------------- 1 | 2 3 | 4 (2 rows) diff --git a/src/test/regress/expected/multi_citus_tools.out b/src/test/regress/expected/multi_citus_tools.out index 53d4b986a..a1c073530 100644 --- a/src/test/regress/expected/multi_citus_tools.out +++ b/src/test/regress/expected/multi_citus_tools.out @@ -14,17 +14,17 @@ SET client_min_messages to ERROR; SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int[], ARRAY['select count(*) from pg_dist_shard']::text[], false); - node_name | node_port | success | result ------------+-----------+---------+------------------------------------ - localhost | 666 | f | failed to connect to localhost:666 + node_name | node_port | success | result +--------------------------------------------------------------------- + localhost | 666 | f | failed to connect to localhost:xxxxx (1 row) SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int[], ARRAY['select count(*) from pg_dist_shard']::text[], true); - node_name | node_port | success | result ------------+-----------+---------+------------------------------------ - localhost | 666 | f | failed to connect to localhost:666 + node_name | node_port | success | result +--------------------------------------------------------------------- + localhost | 666 | f | failed to connect to localhost:xxxxx (1 row) RESET client_min_messages; @@ -37,8 +37,8 @@ SELECT quote_literal(node_name) as node_name, node_port as node_port SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from pg_dist_shard']::text[], false); - node_name | node_port | success | result ------------+-----------+---------+-------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 0 (1 row) @@ -47,8 +47,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select * from pg_dist_shard']::text[], false); - node_name | node_port | success | result ------------+-----------+---------+------------------------------------------ + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | f | expected a single column in query target (1 row) @@ -56,8 +56,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select a from generate_series(1,2) a']::text[], false); - node_name | node_port | success | result ------------+-----------+---------+--------------------------------------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | f | expected a single row in query result (1 row) @@ -67,8 +67,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY['select a from generate_series(1,1) a', 'select a from generate_series(2,2) a']::text[], false); - node_name | node_port | success | result ------------+-----------+---------+-------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57637 | t | 2 (2 rows) @@ -79,8 +79,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY['select a from generate_series(1,1) a', 'select a from generate_series(1,2) a']::text[], false); - node_name | node_port | success | result ------------+-----------+---------+--------------------------------------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57637 | f | expected a single row in query result (2 rows) @@ -91,8 +91,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY['select a from generate_series(1,2) a', 'select a from generate_series(1,2) a']::text[], false); - node_name | node_port | success | result ------------+-----------+---------+--------------------------------------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | f | expected a single row in query result localhost | 57637 | f | expected a single row in query result (2 rows) @@ -103,8 +103,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY['create table first_table(a int, b int)', 'create table second_table(a int, b int)']::text[], false); - node_name | node_port | success | result ------------+-----------+---------+-------------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | CREATE TABLE localhost | 57637 | t | CREATE TABLE (2 rows) @@ -113,16 +113,16 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into first_table select a,a from generate_series(1,20) a']::text[], false); - node_name | node_port | success | result ------------+-----------+---------+------------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from first_table']::text[], false); - node_name | node_port | success | result ------------+-----------+---------+-------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 20 (1 row) @@ -130,16 +130,16 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into second_table select * from first_table']::text[], false); - node_name | node_port | success | result ------------+-----------+---------+------------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into second_table select * from first_table']::text[], false); - node_name | node_port | success | result ------------+-----------+---------+------------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) @@ -147,8 +147,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from second_table']::text[], false); - node_name | node_port | success | result ------------+-----------+---------+-------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 40 (1 row) @@ -162,8 +162,8 @@ SELECT quote_literal(node_name) as node_name, node_port as node_port SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['create index first_table_index on first_table(a)']::text[], false); - node_name | node_port | success | result ------------+-----------+---------+-------------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | CREATE INDEX (1 row) @@ -171,16 +171,16 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['drop table first_table']::text[], false); - node_name | node_port | success | result ------------+-----------+---------+------------ + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | DROP TABLE (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['drop table second_table']::text[], false); - node_name | node_port | success | result ------------+-----------+---------+------------ + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | DROP TABLE (1 row) @@ -188,8 +188,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from second_table']::text[], false); - node_name | node_port | success | result ------------+-----------+---------+------------------------------------------------ + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: relation "second_table" does not exist (1 row) @@ -200,8 +200,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from pg_dist_shard']::text[], true); - node_name | node_port | success | result ------------+-----------+---------+-------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 0 (1 row) @@ -210,8 +210,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select * from pg_dist_shard']::text[], true); - node_name | node_port | success | result ------------+-----------+---------+------------------------------------------ + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | f | expected a single column in query target (1 row) @@ -219,8 +219,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select a from generate_series(1,2) a']::text[], true); - node_name | node_port | success | result ------------+-----------+---------+--------------------------------------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | f | expected a single row in query result (1 row) @@ -230,8 +230,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY['select a from generate_series(1,1) a', 'select a from generate_series(2,2) a']::text[], true); - node_name | node_port | success | result ------------+-----------+---------+-------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57637 | t | 2 (2 rows) @@ -242,8 +242,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY['select a from generate_series(1,1) a', 'select a from generate_series(1,2) a']::text[], true); - node_name | node_port | success | result ------------+-----------+---------+--------------------------------------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57637 | f | expected a single row in query result (2 rows) @@ -254,8 +254,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY['select a from generate_series(1,2) a', 'select a from generate_series(1,2) a']::text[], true); - node_name | node_port | success | result ------------+-----------+---------+--------------------------------------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | f | expected a single row in query result localhost | 57637 | f | expected a single row in query result (2 rows) @@ -266,8 +266,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY['create table first_table(a int, b int)', 'create table second_table(a int, b int)']::text[], true); - node_name | node_port | success | result ------------+-----------+---------+-------------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | CREATE TABLE localhost | 57637 | t | CREATE TABLE (2 rows) @@ -282,16 +282,16 @@ SELECT quote_literal(node_name) as node_name, node_port as node_port SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into first_table select a,a from generate_series(1,20) a']::text[], true); - node_name | node_port | success | result ------------+-----------+---------+------------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from first_table']::text[], true); - node_name | node_port | success | result ------------+-----------+---------+-------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 20 (1 row) @@ -299,16 +299,16 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into second_table select * from first_table']::text[], true); - node_name | node_port | success | result ------------+-----------+---------+------------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into second_table select * from first_table']::text[], true); - node_name | node_port | success | result ------------+-----------+---------+------------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) @@ -316,8 +316,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from second_table']::text[], true); - node_name | node_port | success | result ------------+-----------+---------+-------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 40 (1 row) @@ -325,8 +325,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['create index first_table_index on first_table(a)']::text[], true); - node_name | node_port | success | result ------------+-----------+---------+-------------- + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | CREATE INDEX (1 row) @@ -334,16 +334,16 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['drop table first_table']::text[], true); - node_name | node_port | success | result ------------+-----------+---------+------------ + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | DROP TABLE (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['drop table second_table']::text[], true); - node_name | node_port | success | result ------------+-----------+---------+------------ + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | t | DROP TABLE (1 row) @@ -351,22 +351,22 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from second_table']::text[], true); - node_name | node_port | success | result ------------+-----------+---------+------------------------------------------------ + node_name | node_port | success | result +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: relation "second_table" does not exist (1 row) -- run_command_on_XXX tests SELECT * FROM run_command_on_workers('select 1') ORDER BY 2 ASC; - nodename | nodeport | success | result ------------+----------+---------+-------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57638 | t | 1 (2 rows) SELECT * FROM run_command_on_workers('select count(*) from pg_dist_partition') ORDER BY 2 ASC; - nodename | nodeport | success | result ------------+----------+---------+-------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | 0 localhost | 57638 | t | 0 (2 rows) @@ -375,14 +375,14 @@ SELECT * FROM run_command_on_workers('select count(*) from pg_dist_partition') O SET citus.shard_count TO 5; CREATE TABLE check_placements (key int); SELECT create_distributed_table('check_placements', 'key', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM run_command_on_placements('check_placements', 'select 1'); - nodename | nodeport | shardid | success | result ------------+----------+---------+---------+-------- + nodename | nodeport | shardid | success | result +--------------------------------------------------------------------- localhost | 57637 | 1240000 | t | 1 localhost | 57638 | 1240000 | t | 1 localhost | 57637 | 1240001 | t | 1 @@ -398,8 +398,8 @@ SELECT * FROM run_command_on_placements('check_placements', 'select 1'); UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid % 2 = 0 AND nodeport = :worker_1_port; SELECT * FROM run_command_on_placements('check_placements', 'select 1'); - nodename | nodeport | shardid | success | result ------------+----------+---------+---------+-------- + nodename | nodeport | shardid | success | result +--------------------------------------------------------------------- localhost | 57638 | 1240000 | t | 1 localhost | 57637 | 1240001 | t | 1 localhost | 57638 | 1240001 | t | 1 @@ -413,17 +413,17 @@ DROP TABLE check_placements CASCADE; -- make sure run_on_all_colocated_placements correctly detects colocation CREATE TABLE check_colocated (key int); SELECT create_distributed_table('check_colocated', 'key', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET citus.shard_count TO 4; CREATE TABLE second_table (key int); SELECT create_distributed_table('second_table', 'key', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', @@ -435,9 +435,9 @@ SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 5; CREATE TABLE second_table (key int); SELECT create_distributed_table('second_table', 'key', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', @@ -449,15 +449,15 @@ SET citus.shard_replication_factor TO 2; SET citus.shard_count TO 5; CREATE TABLE second_table (key int); SELECT create_distributed_table('second_table', 'key', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', 'select 1'); - nodename | nodeport | shardid1 | shardid2 | success | result ------------+----------+----------+----------+---------+-------- + nodename | nodeport | shardid1 | shardid2 | success | result +--------------------------------------------------------------------- localhost | 57637 | 1240005 | 1240019 | t | 1 localhost | 57638 | 1240005 | 1240019 | t | 1 localhost | 57637 | 1240006 | 1240020 | t | 1 @@ -476,14 +476,14 @@ DROP TABLE second_table CASCADE; SET citus.shard_count TO 5; CREATE TABLE check_shards (key int); SELECT create_distributed_table('check_shards', 'key', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM run_command_on_shards('check_shards', 'select 1'); - shardid | success | result ----------+---------+-------- + shardid | success | result +--------------------------------------------------------------------- 1240024 | t | 1 1240025 | t | 1 1240026 | t | 1 @@ -494,8 +494,8 @@ SELECT * FROM run_command_on_shards('check_shards', 'select 1'); UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid % 2 = 0; SELECT * FROM run_command_on_shards('check_shards', 'select 1'); NOTICE: some shards do not have active placements - shardid | success | result ----------+---------+-------- + shardid | success | result +--------------------------------------------------------------------- 1240025 | t | 1 1240027 | t | 1 (2 rows) diff --git a/src/test/regress/expected/multi_cluster_management.out b/src/test/regress/expected/multi_cluster_management.out index 972a0da28..58b3c1d86 100644 --- a/src/test/regress/expected/multi_cluster_management.out +++ b/src/test/regress/expected/multi_cluster_management.out @@ -9,70 +9,70 @@ ERROR: cannot create reference table "test_reference_table" DETAIL: There are no active worker nodes. -- add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) -- get the active nodes SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes --------------------------------- + master_get_active_worker_nodes +--------------------------------------------------------------------- (localhost,57638) (localhost,57637) (2 rows) -- try to add a node that is already in the cluster SELECT * FROM master_add_node('localhost', :worker_1_port); - master_add_node ------------------ + master_add_node +--------------------------------------------------------------------- 1 (1 row) -- get the active nodes SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes --------------------------------- + master_get_active_worker_nodes +--------------------------------------------------------------------- (localhost,57638) (localhost,57637) (2 rows) -- try to remove a node (with no placements) SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) -- verify that the node has been deleted SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes --------------------------------- + master_get_active_worker_nodes +--------------------------------------------------------------------- (localhost,57637) (1 row) -- try to disable a node with no placements see that node is removed SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT master_disable_node('localhost', :worker_2_port); - master_disable_node ---------------------- - + master_disable_node +--------------------------------------------------------------------- + (1 row) SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes --------------------------------- + master_get_active_worker_nodes +--------------------------------------------------------------------- (localhost,57637) (1 row) @@ -80,22 +80,22 @@ SELECT master_get_active_worker_nodes(); SET citus.shard_count TO 16; SET citus.shard_replication_factor TO 1; SELECT * FROM master_activate_node('localhost', :worker_2_port); - master_activate_node ----------------------- + master_activate_node +--------------------------------------------------------------------- 3 (1 row) CREATE TABLE cluster_management_test (col_1 text, col_2 int); SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- see that there are some active placements in the candidate node SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 1220001 | 1 | localhost | 57638 1220003 | 1 | localhost | 57638 1220005 | 1 | localhost | 57638 @@ -110,8 +110,8 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHER SELECT master_remove_node('localhost', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes --------------------------------- + master_get_active_worker_nodes +--------------------------------------------------------------------- (localhost,57638) (localhost,57637) (2 rows) @@ -121,15 +121,15 @@ INSERT INTO test_reference_table VALUES (1, '1'); -- try to disable a node with active placements see that node is removed -- observe that a notification is displayed SELECT master_disable_node('localhost', :worker_2_port); -NOTICE: Node localhost:57638 has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57638) to activate this node back. - master_disable_node ---------------------- - +NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57638) to activate this node back. + master_disable_node +--------------------------------------------------------------------- + (1 row) SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes --------------------------------- + master_get_active_worker_nodes +--------------------------------------------------------------------- (localhost,57637) (1 row) @@ -169,50 +169,50 @@ ERROR: permission denied for function master_update_node SET ROLE node_metadata_user; BEGIN; SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port + 1); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_activate_node('localhost', :worker_2_port + 1); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_disable_node('localhost', :worker_2_port + 1); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_remove_node('localhost', :worker_2_port + 1); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port + 1); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_secondary_node('localhost', :worker_2_port + 2, 'localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT master_update_node(nodeid, 'localhost', :worker_2_port + 3) FROM pg_dist_node WHERE nodeport = :worker_2_port; - master_update_node --------------------- - + master_update_node +--------------------------------------------------------------------- + (1 row) SELECT nodename, nodeport, noderole FROM pg_dist_node ORDER BY nodeport; - nodename | nodeport | noderole ------------+----------+----------- + nodename | nodeport | noderole +--------------------------------------------------------------------- localhost | 57637 | primary localhost | 57639 | primary localhost | 57640 | secondary @@ -222,15 +222,15 @@ SELECT nodename, nodeport, noderole FROM pg_dist_node ORDER BY nodeport; ABORT; \c - postgres - :master_port SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes --------------------------------- + master_get_active_worker_nodes +--------------------------------------------------------------------- (localhost,57637) (1 row) -- restore the node for next tests SELECT * FROM master_activate_node('localhost', :worker_2_port); - master_activate_node ----------------------- + master_activate_node +--------------------------------------------------------------------- 3 (1 row) @@ -241,8 +241,8 @@ ERROR: you cannot remove the primary node of a node group which has shard place SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport=:worker_2_port \gset UPDATE pg_dist_placement SET shardstate=3 WHERE groupid=:worker_2_group; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 1220001 | 3 | localhost | 57638 1220003 | 3 | localhost | 57638 1220005 | 3 | localhost | 57638 @@ -257,16 +257,16 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHER SELECT master_remove_node('localhost', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes --------------------------------- + master_get_active_worker_nodes +--------------------------------------------------------------------- (localhost,57638) (localhost,57637) (2 rows) -- clean-up SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -285,78 +285,78 @@ SELECT groupid AS new_group FROM pg_dist_node WHERE nodeid = :new_node \gset UPDATE pg_dist_placement SET groupid = :new_group WHERE groupid = :worker_2_group; -- test that you are allowed to remove secondary nodes even if there are placements SELECT 1 FROM master_add_node('localhost', 9990, groupid => :new_group, noderole => 'secondary'); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements SELECT master_remove_node('localhost', 9990); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) -- clean-up DROP TABLE cluster_management_test; -- check that adding/removing nodes are propagated to nodes with metadata SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport ------------+---------- + nodename | nodeport +--------------------------------------------------------------------- localhost | 57638 (1 row) \c - - - :master_port SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport -----------+---------- + nodename | nodeport +--------------------------------------------------------------------- (0 rows) \c - - - :master_port -- check that added nodes are not propagated to nodes without metadata SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport -----------+---------- + nodename | nodeport +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -364,14 +364,14 @@ SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodep SELECT master_remove_node('localhost', :worker_1_port), master_remove_node('localhost', :worker_2_port); - master_remove_node | master_remove_node ---------------------+-------------------- - | + master_remove_node | master_remove_node +--------------------------------------------------------------------- + | (1 row) SELECT count(1) FROM pg_dist_node; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -379,14 +379,14 @@ SELECT count(1) FROM pg_dist_node; SELECT master_add_node('localhost', :worker_1_port), master_add_node('localhost', :worker_2_port); - master_add_node | master_add_node ------------------+----------------- + master_add_node | master_add_node +--------------------------------------------------------------------- 11 | 12 (1 row) SELECT * FROM pg_dist_node ORDER BY nodeid; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------+---------+-----------+----------+----------+-------------+----------+----------+-------------+----------------+------------------ + nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards +--------------------------------------------------------------------- 11 | 9 | localhost | 57637 | default | f | t | primary | default | f | t 12 | 10 | localhost | 57638 | default | f | t | primary | default | f | t (2 rows) @@ -394,116 +394,116 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; -- check that mixed add/remove node commands work fine inside transaction BEGIN; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) COMMIT; SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport -----------+---------- + nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) COMMIT; SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport ------------+---------- + nodename | nodeport +--------------------------------------------------------------------- localhost | 57638 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport ------------+---------- + nodename | nodeport +--------------------------------------------------------------------- localhost | 57638 (1 row) \c - - - :master_port SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; - master_remove_node --------------------- - - + master_remove_node +--------------------------------------------------------------------- + + (2 rows) SELECT 1 FROM master_add_node('localhost', :worker_1_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) -- check that a distributed table can be created after adding a node in a transaction SET citus.shard_count TO 4; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) CREATE TABLE temp(col1 text, col2 int); SELECT create_distributed_table('temp', 'col1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO temp VALUES ('row1', 1); INSERT INTO temp VALUES ('row2', 2); COMMIT; SELECT col1, col2 FROM temp ORDER BY col1; - col1 | col2 -------+------ + col1 | col2 +--------------------------------------------------------------------- row1 | 1 row2 | 2 (2 rows) @@ -516,8 +516,8 @@ WHERE pg_dist_shard_placement.shardid = pg_dist_shard.shardid AND pg_dist_shard.logicalrelid = 'temp'::regclass AND pg_dist_shard_placement.nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -529,15 +529,15 @@ DELETE FROM pg_dist_placement; DELETE FROM pg_dist_node; \c - - - :master_port SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) -- check that you can't add a primary to a non-default cluster @@ -550,47 +550,47 @@ ERROR: group 14 already has a primary node -- check that you can add secondaries and unavailable nodes to a group SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT 1 FROM master_add_node('localhost', 9998, groupid => :worker_1_group, noderole => 'secondary'); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', 9997, groupid => :worker_1_group, noderole => 'unavailable'); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) -- add_inactive_node also works with secondaries SELECT 1 FROM master_add_inactive_node('localhost', 9996, groupid => :worker_2_group, noderole => 'secondary'); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) -- check that you can add a seconary to a non-default cluster, and activate it, and remove it SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary'); - master_add_inactive_node --------------------------- + master_add_inactive_node +--------------------------------------------------------------------- 22 (1 row) SELECT master_activate_node('localhost', 9999); - master_activate_node ----------------------- + master_activate_node +--------------------------------------------------------------------- 22 (1 row) SELECT master_disable_node('localhost', 9999); - master_disable_node ---------------------- - + master_disable_node +--------------------------------------------------------------------- + (1 row) SELECT master_remove_node('localhost', 9999); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) -- check that you can't manually add two primaries to a group @@ -614,8 +614,8 @@ DETAIL: Failing row contains (16, 14, localhost, 57637, default, f, t, primary, -- check that you /can/ add a secondary node to a non-default cluster SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'olap'); - master_add_node ------------------ + master_add_node +--------------------------------------------------------------------- 25 (1 row) @@ -627,14 +627,14 @@ SELECT master_add_node('localhost', 8887, groupid => :worker_1_group, noderole = 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' 'overflow' ); - master_add_node ------------------ + master_add_node +--------------------------------------------------------------------- 26 (1 row) SELECT * FROM pg_dist_node WHERE nodeport=8887; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------+---------+-----------+----------+----------+-------------+----------+-----------+-----------------------------------------------------------------+----------------+------------------ + nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards +--------------------------------------------------------------------- 26 | 14 | localhost | 8887 | default | f | t | secondary | thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars. | f | t (1 row) @@ -642,22 +642,22 @@ SELECT * FROM pg_dist_node WHERE nodeport=8887; -- them in any of the remaining tests -- master_add_secondary_node lets you skip looking up the groupid SELECT master_add_secondary_node('localhost', 9995, 'localhost', :worker_1_port); - master_add_secondary_node ---------------------------- + master_add_secondary_node +--------------------------------------------------------------------- 27 (1 row) SELECT master_add_secondary_node('localhost', 9994, primaryname => 'localhost', primaryport => :worker_2_port); - master_add_secondary_node ---------------------------- + master_add_secondary_node +--------------------------------------------------------------------- 28 (1 row) SELECT master_add_secondary_node('localhost', 9993, 'localhost', 2000); -ERROR: node at "localhost:2000" does not exist +ERROR: node at "localhost:xxxxx" does not exist SELECT master_add_secondary_node('localhost', 9992, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); - master_add_secondary_node ---------------------------- + master_add_secondary_node +--------------------------------------------------------------------- 29 (1 row) @@ -670,43 +670,43 @@ SELECT master_update_node(:worker_1_node, 'localhost', :worker_2_port); ERROR: there is already another node with the specified hostname and port -- master_update_node moves a node SELECT master_update_node(:worker_1_node, 'somehost', 9000); - master_update_node --------------------- - + master_update_node +--------------------------------------------------------------------- + (1 row) SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------+---------+----------+----------+----------+-------------+----------+----------+-------------+----------------+------------------ + nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards +--------------------------------------------------------------------- 16 | 14 | somehost | 9000 | default | f | t | primary | default | f | t (1 row) -- cleanup SELECT master_update_node(:worker_1_node, 'localhost', :worker_1_port); - master_update_node --------------------- - + master_update_node +--------------------------------------------------------------------- + (1 row) SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------+---------+-----------+----------+----------+-------------+----------+----------+-------------+----------------+------------------ + nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards +--------------------------------------------------------------------- 16 | 14 | localhost | 57637 | default | f | t | primary | default | f | t (1 row) SET citus.shard_replication_factor TO 1; CREATE TABLE test_dist (x int, y int); SELECT create_distributed_table('test_dist', 'x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- testing behaviour when setting shouldhaveshards to false on partially empty node SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false); - master_set_node_property --------------------------- - + master_set_node_property +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_dist_colocated (x int, y int); @@ -714,35 +714,35 @@ CREATE TABLE test_dist_non_colocated (x int, y int); CREATE TABLE test_dist_colocated_with_non_colocated (x int, y int); CREATE TABLE test_ref (a int, b int); SELECT create_distributed_table('test_dist_colocated', 'x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_dist_non_colocated', 'x', colocate_with => 'none'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_dist_colocated_with_non_colocated', 'x', colocate_with => 'test_dist_non_colocated'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('test_ref'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- colocated tables should still be placed on shouldhaveshards false nodes for safety SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count -----------+------- + nodeport | count +--------------------------------------------------------------------- 57637 | 2 57638 | 2 (2 rows) @@ -751,8 +751,8 @@ WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count -----------+------- + nodeport | count +--------------------------------------------------------------------- 57637 | 4 (1 row) @@ -761,8 +761,8 @@ WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated_with_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count -----------+------- + nodeport | count +--------------------------------------------------------------------- 57637 | 4 (1 row) @@ -770,8 +770,8 @@ WHERE logicalrelid = 'test_dist_colocated_with_non_colocated'::regclass GROUP BY SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count -----------+------- + nodeport | count +--------------------------------------------------------------------- 57637 | 1 57638 | 1 (2 rows) @@ -780,9 +780,9 @@ WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated, test_dist_colocated_with_non_colocated; -- testing behaviour when setting shouldhaveshards to false on fully empty node SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false); - master_set_node_property --------------------------- - + master_set_node_property +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_dist (x int, y int); @@ -790,23 +790,23 @@ CREATE TABLE test_dist_colocated (x int, y int); CREATE TABLE test_dist_non_colocated (x int, y int); CREATE TABLE test_ref (a int, b int); SELECT create_distributed_table('test_dist', 'x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('test_ref'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- distributed tables should not be placed on nodes with shouldhaveshards false SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count -----------+------- + nodeport | count +--------------------------------------------------------------------- 57637 | 4 (1 row) @@ -814,16 +814,16 @@ WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count -----------+------- + nodeport | count +--------------------------------------------------------------------- 57637 | 1 57638 | 1 (2 rows) SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true); - master_set_node_property --------------------------- - + master_set_node_property +--------------------------------------------------------------------- + (1 row) -- distributed tables should still not be placed on nodes that were switched to @@ -831,8 +831,8 @@ SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaves SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count -----------+------- + nodeport | count +--------------------------------------------------------------------- 57637 | 4 (1 row) @@ -840,22 +840,22 @@ WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count -----------+------- + nodeport | count +--------------------------------------------------------------------- 57637 | 1 57638 | 1 (2 rows) SELECT create_distributed_table('test_dist_colocated', 'x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('test_dist_non_colocated', 'x', colocate_with => 'none'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- colocated tables should not be placed on nodedes that were switched to @@ -863,8 +863,8 @@ SELECT create_distributed_table('test_dist_non_colocated', 'x', colocate_with => SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count -----------+------- + nodeport | count +--------------------------------------------------------------------- 57637 | 4 (1 row) @@ -873,8 +873,8 @@ WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count -----------+------- + nodeport | count +--------------------------------------------------------------------- 57637 | 2 57638 | 2 (2 rows) diff --git a/src/test/regress/expected/multi_colocated_shard_transfer.out b/src/test/regress/expected/multi_colocated_shard_transfer.out index fab3190bf..4dfc43a84 100644 --- a/src/test/regress/expected/multi_colocated_shard_transfer.out +++ b/src/test/regress/expected/multi_colocated_shard_transfer.out @@ -10,15 +10,15 @@ UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_2_por -- test repairing colocated shards -- status before shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate -FROM +FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; - shardid | logicalrelid | nodeport | colocationid | shardstate ----------+---------------+----------+--------------+------------ + shardid | logicalrelid | nodeport | colocationid | shardstate +--------------------------------------------------------------------- 1300000 | table1_group1 | 57637 | 1000 | 1 1300000 | table1_group1 | 57638 | 1000 | 3 1300001 | table1_group1 | 57637 | 1000 | 1 @@ -39,22 +39,22 @@ ORDER BY s.shardid, sp.nodeport; -- repair colocated shards SELECT master_copy_shard_placement(1300000, 'localhost', :worker_1_port, 'localhost', :worker_2_port); - master_copy_shard_placement ------------------------------ - + master_copy_shard_placement +--------------------------------------------------------------------- + (1 row) -- status after shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate -FROM +FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; - shardid | logicalrelid | nodeport | colocationid | shardstate ----------+---------------+----------+--------------+------------ + shardid | logicalrelid | nodeport | colocationid | shardstate +--------------------------------------------------------------------- 1300000 | table1_group1 | 57637 | 1000 | 1 1300000 | table1_group1 | 57638 | 1000 | 1 1300001 | table1_group1 | 57637 | 1000 | 1 @@ -76,15 +76,15 @@ ORDER BY s.shardid, sp.nodeport; -- test repairing NOT colocated shard -- status before shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate -FROM +FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND p.logicalrelid = 'table5_groupX'::regclass ORDER BY s.shardid, sp.nodeport; - shardid | logicalrelid | nodeport | colocationid | shardstate ----------+---------------+----------+--------------+------------ + shardid | logicalrelid | nodeport | colocationid | shardstate +--------------------------------------------------------------------- 1300016 | table5_groupx | 57637 | 0 | 1 1300016 | table5_groupx | 57638 | 0 | 3 1300017 | table5_groupx | 57637 | 0 | 1 @@ -97,22 +97,22 @@ ORDER BY s.shardid, sp.nodeport; -- repair NOT colocated shard SELECT master_copy_shard_placement(1300016, 'localhost', :worker_1_port, 'localhost', :worker_2_port); - master_copy_shard_placement ------------------------------ - + master_copy_shard_placement +--------------------------------------------------------------------- + (1 row) -- status after shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate -FROM +FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND p.logicalrelid = 'table5_groupX'::regclass ORDER BY s.shardid, sp.nodeport; - shardid | logicalrelid | nodeport | colocationid | shardstate ----------+---------------+----------+--------------+------------ + shardid | logicalrelid | nodeport | colocationid | shardstate +--------------------------------------------------------------------- 1300016 | table5_groupx | 57637 | 0 | 1 1300016 | table5_groupx | 57638 | 0 | 1 1300017 | table5_groupx | 57637 | 0 | 1 @@ -126,15 +126,15 @@ ORDER BY s.shardid, sp.nodeport; -- test repairing shard in append distributed table -- status before shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate -FROM +FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND p.logicalrelid = 'table6_append'::regclass ORDER BY s.shardid, sp.nodeport; - shardid | logicalrelid | nodeport | colocationid | shardstate ----------+---------------+----------+--------------+------------ + shardid | logicalrelid | nodeport | colocationid | shardstate +--------------------------------------------------------------------- 1300020 | table6_append | 57637 | 0 | 1 1300020 | table6_append | 57638 | 0 | 3 1300021 | table6_append | 57637 | 0 | 1 @@ -143,22 +143,22 @@ ORDER BY s.shardid, sp.nodeport; -- repair shard in append distributed table SELECT master_copy_shard_placement(1300020, 'localhost', :worker_1_port, 'localhost', :worker_2_port); - master_copy_shard_placement ------------------------------ - + master_copy_shard_placement +--------------------------------------------------------------------- + (1 row) -- status after shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate -FROM +FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND p.logicalrelid = 'table6_append'::regclass ORDER BY s.shardid, sp.nodeport; - shardid | logicalrelid | nodeport | colocationid | shardstate ----------+---------------+----------+--------------+------------ + shardid | logicalrelid | nodeport | colocationid | shardstate +--------------------------------------------------------------------- 1300020 | table6_append | 57637 | 0 | 1 1300020 | table6_append | 57638 | 0 | 1 1300021 | table6_append | 57637 | 0 | 1 @@ -170,15 +170,15 @@ ORDER BY s.shardid, sp.nodeport; UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1300000; -- status before shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate -FROM +FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; - shardid | logicalrelid | nodeport | colocationid | shardstate ----------+---------------+----------+--------------+------------ + shardid | logicalrelid | nodeport | colocationid | shardstate +--------------------------------------------------------------------- 1300000 | table1_group1 | 57637 | 1000 | 3 1300000 | table1_group1 | 57638 | 1000 | 3 1300001 | table1_group1 | 57637 | 1000 | 1 @@ -202,15 +202,15 @@ SELECT master_copy_shard_placement(1300000, 'localhost', :worker_1_port, 'localh ERROR: source placement must be in finalized state -- status after shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate -FROM +FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; - shardid | logicalrelid | nodeport | colocationid | shardstate ----------+---------------+----------+--------------+------------ + shardid | logicalrelid | nodeport | colocationid | shardstate +--------------------------------------------------------------------- 1300000 | table1_group1 | 57637 | 1000 | 3 1300000 | table1_group1 | 57638 | 1000 | 3 1300001 | table1_group1 | 57637 | 1000 | 1 diff --git a/src/test/regress/expected/multi_colocation_utils.out b/src/test/regress/expected/multi_colocation_utils.out index e4c03ca7b..7a5120354 100644 --- a/src/test/regress/expected/multi_colocation_utils.out +++ b/src/test/regress/expected/multi_colocation_utils.out @@ -6,7 +6,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 4; CREATE SEQUENCE colocation_test_seq MINVALUE 1000 NO CYCLE; -/* a very simple UDF that only sets the colocation ids the same +/* a very simple UDF that only sets the colocation ids the same * DO NOT USE THIS FUNCTION IN PRODUCTION. It manually sets colocationid column of * pg_dist_partition and it does not check anything about pyshical state about shards. */ @@ -25,7 +25,7 @@ BEGIN FROM pg_dist_partition p1, pg_dist_partition p2 WHERE p2.logicalrelid = source_table AND - (p1.logicalrelid = source_table OR + (p1.logicalrelid = source_table OR (p1.colocationId = p2.colocationId AND p1.colocationId != 0))) UNION (SELECT target_table) @@ -58,281 +58,281 @@ CREATE FUNCTION find_shard_interval_index(bigint) -- create distributed table observe shard pruning CREATE TABLE table1_group1 ( id int ); SELECT master_create_distributed_table('table1_group1', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('table1_group1', 4, 2); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) CREATE TABLE table2_group1 ( id int ); SELECT master_create_distributed_table('table2_group1', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('table2_group1', 4, 2); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) CREATE TABLE table3_group2 ( id int ); SELECT master_create_distributed_table('table3_group2', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('table3_group2', 4, 2); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) CREATE TABLE table4_group2 ( id int ); SELECT master_create_distributed_table('table4_group2', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('table4_group2', 4, 2); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) CREATE TABLE table5_groupX ( id int ); SELECT master_create_distributed_table('table5_groupX', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('table5_groupX', 4, 2); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) CREATE TABLE table6_append ( id int ); SELECT master_create_distributed_table('table6_append', 'id', 'append'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_empty_shard('table6_append'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 1300020 (1 row) SELECT master_create_empty_shard('table6_append'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 1300021 (1 row) -- make table1_group1 and table2_group1 co-located manually SELECT colocation_test_colocate_tables('table1_group1', 'table2_group1'); - colocation_test_colocate_tables ---------------------------------- + colocation_test_colocate_tables +--------------------------------------------------------------------- t (1 row) -- check co-location id SELECT get_table_colocation_id('table1_group1'); - get_table_colocation_id -------------------------- + get_table_colocation_id +--------------------------------------------------------------------- 1000 (1 row) SELECT get_table_colocation_id('table5_groupX'); - get_table_colocation_id -------------------------- + get_table_colocation_id +--------------------------------------------------------------------- 0 (1 row) SELECT get_table_colocation_id('table6_append'); - get_table_colocation_id -------------------------- + get_table_colocation_id +--------------------------------------------------------------------- 0 (1 row) -- check self table co-location SELECT tables_colocated('table1_group1', 'table1_group1'); - tables_colocated ------------------- + tables_colocated +--------------------------------------------------------------------- t (1 row) SELECT tables_colocated('table5_groupX', 'table5_groupX'); - tables_colocated ------------------- + tables_colocated +--------------------------------------------------------------------- t (1 row) SELECT tables_colocated('table6_append', 'table6_append'); - tables_colocated ------------------- + tables_colocated +--------------------------------------------------------------------- t (1 row) -- check table co-location with same co-location group SELECT tables_colocated('table1_group1', 'table2_group1'); - tables_colocated ------------------- + tables_colocated +--------------------------------------------------------------------- t (1 row) -- check table co-location with different co-location group SELECT tables_colocated('table1_group1', 'table3_group2'); - tables_colocated ------------------- + tables_colocated +--------------------------------------------------------------------- f (1 row) -- check table co-location with invalid co-location group SELECT tables_colocated('table1_group1', 'table5_groupX'); - tables_colocated ------------------- + tables_colocated +--------------------------------------------------------------------- f (1 row) SELECT tables_colocated('table1_group1', 'table6_append'); - tables_colocated ------------------- + tables_colocated +--------------------------------------------------------------------- f (1 row) -- check self shard co-location SELECT shards_colocated(1300000, 1300000); - shards_colocated ------------------- + shards_colocated +--------------------------------------------------------------------- t (1 row) SELECT shards_colocated(1300016, 1300016); - shards_colocated ------------------- + shards_colocated +--------------------------------------------------------------------- t (1 row) SELECT shards_colocated(1300020, 1300020); - shards_colocated ------------------- + shards_colocated +--------------------------------------------------------------------- t (1 row) -- check shard co-location with same co-location group SELECT shards_colocated(1300000, 1300004); - shards_colocated ------------------- + shards_colocated +--------------------------------------------------------------------- t (1 row) -- check shard co-location with same table different co-location group SELECT shards_colocated(1300000, 1300001); - shards_colocated ------------------- + shards_colocated +--------------------------------------------------------------------- f (1 row) -- check shard co-location with different co-location group SELECT shards_colocated(1300000, 1300005); - shards_colocated ------------------- + shards_colocated +--------------------------------------------------------------------- f (1 row) -- check shard co-location with invalid co-location group SELECT shards_colocated(1300000, 1300016); - shards_colocated ------------------- + shards_colocated +--------------------------------------------------------------------- f (1 row) SELECT shards_colocated(1300000, 1300020); - shards_colocated ------------------- + shards_colocated +--------------------------------------------------------------------- f (1 row) -- check co-located table list SELECT UNNEST(get_colocated_table_array('table1_group1'))::regclass ORDER BY 1; - unnest ---------------- + unnest +--------------------------------------------------------------------- table1_group1 table2_group1 (2 rows) SELECT UNNEST(get_colocated_table_array('table5_groupX'))::regclass ORDER BY 1; - unnest ---------------- + unnest +--------------------------------------------------------------------- table5_groupx (1 row) SELECT UNNEST(get_colocated_table_array('table6_append'))::regclass ORDER BY 1; - unnest ---------------- + unnest +--------------------------------------------------------------------- table6_append (1 row) -- check co-located shard list SELECT UNNEST(get_colocated_shard_array(1300000))::regclass ORDER BY 1; - unnest ---------- + unnest +--------------------------------------------------------------------- 1300000 1300004 (2 rows) SELECT UNNEST(get_colocated_shard_array(1300016))::regclass ORDER BY 1; - unnest ---------- + unnest +--------------------------------------------------------------------- 1300016 (1 row) SELECT UNNEST(get_colocated_shard_array(1300020))::regclass ORDER BY 1; - unnest ---------- + unnest +--------------------------------------------------------------------- 1300020 (1 row) -- check FindShardIntervalIndex function SELECT find_shard_interval_index(1300000); - find_shard_interval_index ---------------------------- + find_shard_interval_index +--------------------------------------------------------------------- 0 (1 row) SELECT find_shard_interval_index(1300001); - find_shard_interval_index ---------------------------- + find_shard_interval_index +--------------------------------------------------------------------- 1 (1 row) SELECT find_shard_interval_index(1300002); - find_shard_interval_index ---------------------------- + find_shard_interval_index +--------------------------------------------------------------------- 2 (1 row) SELECT find_shard_interval_index(1300003); - find_shard_interval_index ---------------------------- + find_shard_interval_index +--------------------------------------------------------------------- 3 (1 row) SELECT find_shard_interval_index(1300016); - find_shard_interval_index ---------------------------- + find_shard_interval_index +--------------------------------------------------------------------- 0 (1 row) @@ -340,32 +340,32 @@ SELECT find_shard_interval_index(1300016); SET citus.shard_count = 2; CREATE TABLE table1_groupA ( id int ); SELECT create_distributed_table('table1_groupA', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table2_groupA ( id int ); SELECT create_distributed_table('table2_groupA', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- change shard replication factor SET citus.shard_replication_factor = 1; CREATE TABLE table1_groupB ( id int ); SELECT create_distributed_table('table1_groupB', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table2_groupB ( id int ); SELECT create_distributed_table('table2_groupB', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='table1_groupB'::regclass; @@ -375,64 +375,64 @@ SET citus.shard_replication_factor to DEFAULT; -- change partition column type CREATE TABLE table1_groupC ( id text ); SELECT create_distributed_table('table1_groupC', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table2_groupC ( id text ); SELECT create_distributed_table('table2_groupC', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- change shard count SET citus.shard_count = 8; CREATE TABLE table1_groupD ( id int ); SELECT create_distributed_table('table1_groupD', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table2_groupD ( id int ); SELECT create_distributed_table('table2_groupD', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- try other distribution methods CREATE TABLE table_append ( id int ); SELECT create_distributed_table('table_append', 'id', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table_range ( id int ); SELECT create_distributed_table('table_range', 'id', 'range'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- test foreign table creation CREATE FOREIGN TABLE table3_groupD ( id int ) SERVER fake_fdw_server; SELECT create_distributed_table('table3_groupD', 'id'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- check metadata -SELECT * FROM pg_dist_colocation - WHERE colocationid >= 1 AND colocationid < 1000 +SELECT * FROM pg_dist_colocation + WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 3 | 4 | 2 | 23 | 0 4 | 2 | 2 | 23 | 0 5 | 2 | 1 | 23 | 0 @@ -441,10 +441,10 @@ SELECT * FROM pg_dist_colocation (5 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition - WHERE colocationid >= 1 AND colocationid < 1000 + WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY logicalrelid; - logicalrelid | colocationid ----------------+-------------- + logicalrelid | colocationid +--------------------------------------------------------------------- table1_groupa | 4 table2_groupa | 4 table1_groupb | 5 @@ -459,16 +459,16 @@ SELECT logicalrelid, colocationid FROM pg_dist_partition -- check effects of dropping tables DROP TABLE table1_groupA; SELECT * FROM pg_dist_colocation WHERE colocationid = 4; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 4 | 2 | 2 | 23 | 0 (1 row) -- dropping all tables in a colocation group also deletes the colocation group DROP TABLE table2_groupA; SELECT * FROM pg_dist_colocation WHERE colocationid = 4; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 4 | 2 | 2 | 23 | 0 (1 row) @@ -476,87 +476,87 @@ SELECT * FROM pg_dist_colocation WHERE colocationid = 4; SET citus.shard_count = 2; CREATE TABLE table1_groupE ( id int ); SELECT create_distributed_table('table1_groupE', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table2_groupE ( id int ); SELECT create_distributed_table('table2_groupE', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- test different table DDL CREATE TABLE table3_groupE ( dummy_column text, id int ); SELECT create_distributed_table('table3_groupE', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- test different schema CREATE SCHEMA schema_colocation; CREATE TABLE schema_colocation.table4_groupE ( id int ); SELECT create_distributed_table('schema_colocation.table4_groupE', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- test colocate_with option CREATE TABLE table1_group_none_1 ( id int ); SELECT create_distributed_table('table1_group_none_1', 'id', colocate_with => 'none'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table2_group_none_1 ( id int ); SELECT create_distributed_table('table2_group_none_1', 'id', colocate_with => 'table1_group_none_1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table1_group_none_2 ( id int ); SELECT create_distributed_table('table1_group_none_2', 'id', colocate_with => 'none'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table4_groupE ( id int ); SELECT create_distributed_table('table4_groupE', 'id', colocate_with => 'default'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET citus.shard_count = 3; -- check that this new configuration does not have a default group CREATE TABLE table1_group_none_3 ( id int ); SELECT create_distributed_table('table1_group_none_3', 'id', colocate_with => 'NONE'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- a new table does not use a non-default group CREATE TABLE table1_group_default ( id int ); SELECT create_distributed_table('table1_group_default', 'id', colocate_with => 'DEFAULT'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- check metadata SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 3 | 4 | 2 | 23 | 0 4 | 2 | 2 | 23 | 0 5 | 2 | 1 | 23 | 0 @@ -568,8 +568,8 @@ SELECT * FROM pg_dist_colocation SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; - logicalrelid | colocationid ----------------------------------+-------------- + logicalrelid | colocationid +--------------------------------------------------------------------- table1_groupe | 4 table2_groupe | 4 table3_groupe | 4 @@ -605,9 +605,9 @@ ERROR: relation "no_table" does not exist SELECT create_distributed_table('table_failing', 'id', colocate_with => ''); ERROR: invalid name syntax SELECT create_distributed_table('table_failing', 'id', colocate_with => NULL); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- check with different distribution column types @@ -618,40 +618,40 @@ DETAIL: Distribution column types don't match for table1_groupe and table_bigin -- check worker table schemas \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table3_groupE_1300062'::regclass; - Column | Type | Modifiers ---------------+---------+----------- - dummy_column | text | - id | integer | + Column | Type | Modifiers +--------------------------------------------------------------------- + dummy_column | text | + id | integer | (2 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='schema_colocation.table4_groupE_1300064'::regclass; - Column | Type | Modifiers ---------+---------+----------- - id | integer | + Column | Type | Modifiers +--------------------------------------------------------------------- + id | integer | (1 row) \c - - - :master_port SET citus.next_shard_id TO 1300080; CREATE TABLE table1_groupF ( id int ); SELECT create_reference_table('table1_groupF'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table2_groupF ( id int ); SELECT create_reference_table('table2_groupF'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- check metadata SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 3 | 4 | 2 | 23 | 0 4 | 2 | 2 | 23 | 0 5 | 2 | 1 | 23 | 0 @@ -661,7 +661,7 @@ SELECT * FROM pg_dist_colocation (6 rows) -- cross check with internal colocation API -SELECT +SELECT p1.logicalrelid::regclass AS table1, p2.logicalrelid::regclass AS table2, tables_colocated(p1.logicalrelid , p2.logicalrelid) AS colocated @@ -676,8 +676,8 @@ WHERE ORDER BY table1, table2; - table1 | table2 | colocated ----------------------------------+---------------------------------+----------- + table1 | table2 | colocated +--------------------------------------------------------------------- table1_group1 | table2_group1 | t table1_groupb | table2_groupb | t table1_groupc | table2_groupc | t @@ -717,8 +717,8 @@ ORDER BY shardmaxvalue::integer, shardid, nodeport; - logicalrelid | shardid | shardstorage | nodeport | shardminvalue | shardmaxvalue ----------------------------------+---------+--------------+----------+---------------+--------------- + logicalrelid | shardid | shardstorage | nodeport | shardminvalue | shardmaxvalue +--------------------------------------------------------------------- table1_groupb | 1300026 | t | 57637 | -2147483648 | -1 table1_groupb | 1300027 | t | 57638 | 0 | 2147483647 table2_groupb | 1300028 | t | 57637 | -2147483648 | -1 @@ -823,10 +823,10 @@ ORDER BY table1_group_default | 1300078 | t | 57638 | -715827883 | 715827881 table1_group_default | 1300079 | t | 57637 | 715827882 | 2147483647 table1_group_default | 1300079 | t | 57638 | 715827882 | 2147483647 - table1_groupf | 1300080 | t | 57637 | | - table1_groupf | 1300080 | t | 57638 | | - table2_groupf | 1300081 | t | 57637 | | - table2_groupf | 1300081 | t | 57638 | | + table1_groupf | 1300080 | t | 57637 | | + table1_groupf | 1300080 | t | 57638 | | + table2_groupf | 1300081 | t | 57637 | | + table2_groupf | 1300081 | t | 57638 | | (108 rows) -- reset colocation ids to test mark_tables_colocated @@ -839,15 +839,15 @@ UPDATE pg_dist_partition SET colocationid = 0 SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- (0 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; - logicalrelid | colocationid ---------------+-------------- + logicalrelid | colocationid +--------------------------------------------------------------------- (0 rows) -- first check failing cases @@ -859,7 +859,7 @@ ERROR: cannot colocate tables table1_groupb and table1_groupd DETAIL: Shard counts don't match for table1_groupb and table1_groupd. SELECT mark_tables_colocated('table1_groupB', ARRAY['table1_groupE']); ERROR: cannot colocate tables table1_groupb and table1_groupe -DETAIL: Shard 1300026 of table1_groupb and shard 1300058 of table1_groupe have different number of shard placements. +DETAIL: Shard 1300026 of table1_groupb and shard xxxxx of table1_groupe have different number of shard placements. SELECT mark_tables_colocated('table1_groupB', ARRAY['table1_groupF']); ERROR: cannot colocate tables table1_groupb and table1_groupf DETAIL: Replication models don't match for table1_groupb and table1_groupf. @@ -870,76 +870,76 @@ DETAIL: Shard counts don't match for table1_groupb and table1_groupd. SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- (0 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; - logicalrelid | colocationid ---------------+-------------- + logicalrelid | colocationid +--------------------------------------------------------------------- (0 rows) -- check successfully cololated tables SELECT mark_tables_colocated('table1_groupB', ARRAY['table2_groupB']); - mark_tables_colocated ------------------------ - + mark_tables_colocated +--------------------------------------------------------------------- + (1 row) SELECT mark_tables_colocated('table1_groupC', ARRAY['table2_groupC']); - mark_tables_colocated ------------------------ - + mark_tables_colocated +--------------------------------------------------------------------- + (1 row) SELECT mark_tables_colocated('table1_groupD', ARRAY['table2_groupD']); - mark_tables_colocated ------------------------ - + mark_tables_colocated +--------------------------------------------------------------------- + (1 row) SELECT mark_tables_colocated('table1_groupE', ARRAY['table2_groupE', 'table3_groupE']); - mark_tables_colocated ------------------------ - + mark_tables_colocated +--------------------------------------------------------------------- + (1 row) SELECT mark_tables_colocated('table1_groupF', ARRAY['table2_groupF']); - mark_tables_colocated ------------------------ - + mark_tables_colocated +--------------------------------------------------------------------- + (1 row) -- check to colocate with itself SELECT mark_tables_colocated('table1_groupB', ARRAY['table1_groupB']); - mark_tables_colocated ------------------------ - + mark_tables_colocated +--------------------------------------------------------------------- + (1 row) SET citus.shard_count = 2; CREATE TABLE table1_group_none ( id int ); SELECT create_distributed_table('table1_group_none', 'id', colocate_with => 'NONE'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table2_group_none ( id int ); SELECT create_distributed_table('table2_group_none', 'id', colocate_with => 'NONE'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- check metadata to see colocation groups are created successfully -SELECT * FROM pg_dist_colocation - WHERE colocationid >= 1 AND colocationid < 1000 +SELECT * FROM pg_dist_colocation + WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 2 | 2 | 1 | 23 | 0 3 | 2 | 2 | 25 | 100 4 | 8 | 2 | 23 | 0 @@ -949,8 +949,8 @@ SELECT * FROM pg_dist_colocation SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; - logicalrelid | colocationid --------------------+-------------- + logicalrelid | colocationid +--------------------------------------------------------------------- table1_groupb | 2 table2_groupb | 2 table1_groupc | 3 @@ -966,24 +966,24 @@ SELECT logicalrelid, colocationid FROM pg_dist_partition -- move the all tables in colocation group 5 to colocation group 7 SELECT mark_tables_colocated('table1_group_none', ARRAY['table1_groupE', 'table2_groupE', 'table3_groupE']); - mark_tables_colocated ------------------------ - + mark_tables_colocated +--------------------------------------------------------------------- + (1 row) -- move a table with a colocation id which is already not in pg_dist_colocation SELECT mark_tables_colocated('table1_group_none', ARRAY['table2_group_none']); - mark_tables_colocated ------------------------ - + mark_tables_colocated +--------------------------------------------------------------------- + (1 row) -- check metadata to see that unused colocation group is deleted SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 2 | 2 | 1 | 23 | 0 3 | 2 | 2 | 25 | 100 4 | 8 | 2 | 23 | 0 @@ -992,8 +992,8 @@ SELECT * FROM pg_dist_colocation SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; - logicalrelid | colocationid --------------------+-------------- + logicalrelid | colocationid +--------------------------------------------------------------------- table1_groupb | 2 table2_groupb | 2 table1_groupc | 3 @@ -1010,9 +1010,9 @@ SELECT logicalrelid, colocationid FROM pg_dist_partition -- try to colocate different replication models CREATE TABLE table1_groupG ( id int ); SELECT create_distributed_table('table1_groupG', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- update replication model @@ -1024,9 +1024,9 @@ DETAIL: Replication models don't match for table1_groupg and table2_groupg. CREATE TABLE table2_groupG ( id int ); ERROR: relation "table2_groupg" already exists SELECT create_distributed_table('table2_groupG', 'id', colocate_with => 'NONE'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT mark_tables_colocated('table1_groupG', ARRAY['table2_groupG']); diff --git a/src/test/regress/expected/multi_complex_expressions.out b/src/test/regress/expected/multi_complex_expressions.out index 4321ea05f..bb37e3ea3 100644 --- a/src/test/regress/expected/multi_complex_expressions.out +++ b/src/test/regress/expected/multi_complex_expressions.out @@ -3,45 +3,45 @@ -- -- Check that we can correctly handle complex expressions and aggregates. SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; - ?column? ------------------------- + ?column? +--------------------------------------------------------------------- 12000.0000000000000000 (1 row) SELECT sum(l_quantity) / (10 * avg(l_quantity)) FROM lineitem; - ?column? ------------------------ + ?column? +--------------------------------------------------------------------- 1200.0000000000000000 (1 row) SELECT (sum(l_quantity) / (10 * avg(l_quantity))) + 11 FROM lineitem; - ?column? ------------------------ + ?column? +--------------------------------------------------------------------- 1211.0000000000000000 (1 row) SELECT avg(l_quantity) as average FROM lineitem; - average ---------------------- + average +--------------------------------------------------------------------- 25.4462500000000000 (1 row) SELECT 100 * avg(l_quantity) as average_times_hundred FROM lineitem; - average_times_hundred ------------------------ + average_times_hundred +--------------------------------------------------------------------- 2544.6250000000000000 (1 row) SELECT 100 * avg(l_quantity) / 10 as average_times_ten FROM lineitem; - average_times_ten ----------------------- + average_times_ten +--------------------------------------------------------------------- 254.4625000000000000 (1 row) -SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem +SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; - l_quantity | count_quantity -------------+---------------- + l_quantity | count_quantity +--------------------------------------------------------------------- 44.00 | 2150 38.00 | 2160 45.00 | 2180 @@ -97,147 +97,147 @@ SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem -- Check that we can handle complex select clause expressions. SELECT count(*) FROM lineitem WHERE octet_length(l_comment || l_comment) > 40; - count -------- + count +--------------------------------------------------------------------- 8148 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(concat(l_comment, l_comment)) > 40; - count -------- + count +--------------------------------------------------------------------- 8148 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(l_comment) + octet_length('randomtext'::text) > 40; - count -------- + count +--------------------------------------------------------------------- 4611 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(l_comment) + 10 > 40; - count -------- + count +--------------------------------------------------------------------- 4611 (1 row) SELECT count(*) FROM lineitem WHERE (l_receiptdate::timestamp - l_shipdate::timestamp) > interval '5 days'; - count -------- + count +--------------------------------------------------------------------- 10008 (1 row) -- can push down queries where no columns present on the WHERE clause SELECT count(*) FROM lineitem WHERE random() = -0.1; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- boolean tests can be pushed down SELECT count(*) FROM lineitem WHERE (l_partkey > 10000) is true; - count -------- + count +--------------------------------------------------------------------- 11423 (1 row) -- scalar array operator expressions can be pushed down SELECT count(*) FROM lineitem WHERE l_partkey = ANY(ARRAY[19353, 19354, 19355]); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- some more scalar array operator expressions SELECT count(*) FROM lineitem WHERE l_partkey = ALL(ARRAY[19353]); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- operator expressions involving arrays SELECT count(*) FROM lineitem WHERE ARRAY[19353, 19354, 19355] @> ARRAY[l_partkey]; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- coerced via io expressions can be pushed down SELECT count(*) FROM lineitem WHERE (l_quantity/100)::int::bool::text::bool; - count -------- + count +--------------------------------------------------------------------- 260 (1 row) -- case expressions can be pushed down SELECT count(*) FROM lineitem WHERE (CASE WHEN l_orderkey > 4000 THEN l_partkey / 100 > 1 ELSE false END); - count -------- + count +--------------------------------------------------------------------- 7948 (1 row) -- coalesce expressions can be pushed down SELECT count(*) FROM lineitem WHERE COALESCE((l_partkey/50000)::bool, false); - count -------- + count +--------------------------------------------------------------------- 9122 (1 row) -- nullif expressions can be pushed down SELECT count(*) FROM lineitem WHERE NULLIF((l_partkey/50000)::bool, false); - count -------- + count +--------------------------------------------------------------------- 9122 (1 row) -- null test expressions can be pushed down SELECT count(*) FROM orders WHERE o_comment IS NOT null; - count -------- + count +--------------------------------------------------------------------- 2985 (1 row) -- functions can be pushed down SELECT count(*) FROM lineitem WHERE isfinite(l_shipdate); - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) -- constant expressions can be pushed down SELECT count(*) FROM lineitem WHERE 0 != 0; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- distinct expressions can be pushed down SELECT count(*) FROM lineitem WHERE l_partkey IS DISTINCT FROM 50040; - count -------- + count +--------------------------------------------------------------------- 11999 (1 row) -- row compare expression can be pushed down SELECT count(*) FROM lineitem WHERE row(l_partkey, 2, 3) > row(2000, 2, 3); - count -------- + count +--------------------------------------------------------------------- 11882 (1 row) @@ -251,8 +251,8 @@ SELECT count(*) FROM lineitem isfinite(l_shipdate) AND l_partkey IS DISTINCT FROM 50040 AND row(l_partkey, 2, 3) > row(2000, 2, 3); - count -------- + count +--------------------------------------------------------------------- 137 (1 row) @@ -263,8 +263,8 @@ SELECT l_linenumber FROM lineitem ORDER BY l_linenumber LIMIT 1; - l_linenumber --------------- + l_linenumber +--------------------------------------------------------------------- 1 (1 row) @@ -276,8 +276,8 @@ SELECT count(*) * l_discount as total_discount, count(*), sum(l_tax), l_discount l_discount ORDER BY total_discount DESC, sum(l_tax) DESC; - total_discount | count | sum | l_discount -----------------+-------+-------+------------ + total_discount | count | sum | l_discount +--------------------------------------------------------------------- 104.80 | 1048 | 41.08 | 0.10 98.55 | 1095 | 44.15 | 0.09 90.64 | 1133 | 45.94 | 0.08 @@ -299,8 +299,8 @@ SELECT l_linenumber FROM lineitem ORDER BY l_linenumber LIMIT 1; - l_linenumber --------------- + l_linenumber +--------------------------------------------------------------------- 2 (1 row) @@ -314,30 +314,30 @@ SELECT max(l_linenumber), min(l_discount), l_receiptdate FROM lineitem ORDER BY l_receiptdate LIMIT 1; - max | min | l_receiptdate ------+------+--------------- + max | min | l_receiptdate +--------------------------------------------------------------------- 3 | 0.07 | 01-09-1992 (1 row) -- Check that we can handle implicit and explicit join clause definitions. SELECT count(*) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5; - count -------- + count +--------------------------------------------------------------------- 951 (1 row) SELECT count(*) FROM lineitem JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5; - count -------- + count +--------------------------------------------------------------------- 951 (1 row) SELECT count(*) FROM lineitem JOIN orders ON l_orderkey = o_orderkey WHERE l_quantity < 5; - count -------- + count +--------------------------------------------------------------------- 951 (1 row) @@ -347,8 +347,8 @@ ERROR: complex joins are only supported when all distributed tables are joined -- Check that we can issue limit/offset queries -- the subquery is recursively planned since it contains OFFSET, which is not pushdownable SELECT * FROM (SELECT o_custkey FROM orders GROUP BY o_custkey ORDER BY o_custkey OFFSET 20) sq ORDER BY 1 LIMIT 5; - o_custkey ------------ + o_custkey +--------------------------------------------------------------------- 35 37 38 @@ -358,8 +358,8 @@ SELECT * FROM (SELECT o_custkey FROM orders GROUP BY o_custkey ORDER BY o_custke -- the subquery is recursively planned since it contains OFFSET, which is not pushdownable SELECT * FROM (SELECT o_orderkey FROM orders ORDER BY o_orderkey OFFSET 20) sq ORDER BY 1 LIMIT 5; - o_orderkey ------------- + o_orderkey +--------------------------------------------------------------------- 69 70 71 @@ -369,8 +369,8 @@ SELECT * FROM (SELECT o_orderkey FROM orders ORDER BY o_orderkey OFFSET 20) sq O -- Simple LIMIT/OFFSET with ORDER BY SELECT o_orderkey FROM orders ORDER BY o_orderkey LIMIT 10 OFFSET 20; - o_orderkey ------------- + o_orderkey +--------------------------------------------------------------------- 69 70 71 @@ -385,19 +385,19 @@ SELECT o_orderkey FROM orders ORDER BY o_orderkey LIMIT 10 OFFSET 20; -- LIMIT/OFFSET with a subquery SET citus.task_executor_type TO 'task-tracker'; -SELECT +SELECT customer_keys.o_custkey, - SUM(order_count) AS total_order_count -FROM - (SELECT o_custkey, o_orderstatus, COUNT(*) AS order_count + SUM(order_count) AS total_order_count +FROM + (SELECT o_custkey, o_orderstatus, COUNT(*) AS order_count FROM orders GROUP BY o_custkey, o_orderstatus ) customer_keys -GROUP BY +GROUP BY customer_keys.o_custkey -ORDER BY +ORDER BY customer_keys.o_custkey DESC LIMIT 10 OFFSET 20; - o_custkey | total_order_count ------------+------------------- + o_custkey | total_order_count +--------------------------------------------------------------------- 1466 | 1 1465 | 2 1463 | 4 @@ -429,8 +429,8 @@ CREATE TEMP TABLE temp_limit_test_4 AS SELECT o_custkey, COUNT(*) AS ccnt FROM orders GROUP BY o_custkey ORDER BY ccnt DESC LIMIT 10 OFFSET 15; -- OFFSET without LIMIT SELECT o_custkey FROM orders ORDER BY o_custkey OFFSET 2980; - o_custkey ------------ + o_custkey +--------------------------------------------------------------------- 1498 1498 1499 @@ -439,19 +439,19 @@ SELECT o_custkey FROM orders ORDER BY o_custkey OFFSET 2980; (5 rows) -- LIMIT/OFFSET with Joins -SELECT +SELECT li.l_partkey, o.o_custkey, li.l_quantity -FROM +FROM lineitem li JOIN orders o ON li.l_orderkey = o.o_orderkey -WHERE +WHERE li.l_quantity > 25 ORDER BY 1, 2, 3 LIMIT 10 OFFSET 20; DEBUG: push down of limit count: 30 - l_partkey | o_custkey | l_quantity ------------+-----------+------------ + l_partkey | o_custkey | l_quantity +--------------------------------------------------------------------- 655 | 58 | 50.00 669 | 319 | 34.00 699 | 1255 | 50.00 @@ -478,8 +478,8 @@ SELECT GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_orderkey | sum | sum | count | count | max | max -------------+-----------+-----------+-------+-------+-----------+---------- + l_orderkey | sum | sum | count | count | max | max +--------------------------------------------------------------------- 12804 | 440012.71 | 45788.16 | 7 | 1 | 94398.00 | 45788.16 9863 | 412560.63 | 175647.63 | 7 | 3 | 85723.77 | 50769.14 2567 | 412076.77 | 59722.26 | 7 | 1 | 94894.00 | 9784.02 @@ -505,8 +505,8 @@ SELECT HAVING count(*) FILTER (WHERE l_shipmode = 'AIR') > 1 ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_orderkey | sum | sum | count | count | max | max -------------+-----------+-----------+-------+-------+----------+---------- + l_orderkey | sum | sum | count | count | max | max +--------------------------------------------------------------------- 9863 | 412560.63 | 175647.63 | 7 | 3 | 85723.77 | 50769.14 12039 | 407048.94 | 76406.30 | 7 | 2 | 94471.02 | 19679.30 5606 | 403595.91 | 36531.51 | 7 | 2 | 94890.18 | 30582.75 diff --git a/src/test/regress/expected/multi_complex_expressions_0.out b/src/test/regress/expected/multi_complex_expressions_0.out index eaf036a9f..9a2418d41 100644 --- a/src/test/regress/expected/multi_complex_expressions_0.out +++ b/src/test/regress/expected/multi_complex_expressions_0.out @@ -3,45 +3,45 @@ -- -- Check that we can correctly handle complex expressions and aggregates. SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; - ?column? ------------------------- + ?column? +--------------------------------------------------------------------- 12000.0000000000000000 (1 row) SELECT sum(l_quantity) / (10 * avg(l_quantity)) FROM lineitem; - ?column? ------------------------ + ?column? +--------------------------------------------------------------------- 1200.0000000000000000 (1 row) SELECT (sum(l_quantity) / (10 * avg(l_quantity))) + 11 FROM lineitem; - ?column? ------------------------ + ?column? +--------------------------------------------------------------------- 1211.0000000000000000 (1 row) SELECT avg(l_quantity) as average FROM lineitem; - average ---------------------- + average +--------------------------------------------------------------------- 25.4462500000000000 (1 row) SELECT 100 * avg(l_quantity) as average_times_hundred FROM lineitem; - average_times_hundred ------------------------ + average_times_hundred +--------------------------------------------------------------------- 2544.6250000000000000 (1 row) SELECT 100 * avg(l_quantity) / 10 as average_times_ten FROM lineitem; - average_times_ten ----------------------- + average_times_ten +--------------------------------------------------------------------- 254.4625000000000000 (1 row) -SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem +SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; - l_quantity | count_quantity -------------+---------------- + l_quantity | count_quantity +--------------------------------------------------------------------- 44.00 | 2150 38.00 | 2160 45.00 | 2180 @@ -97,147 +97,147 @@ SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem -- Check that we can handle complex select clause expressions. SELECT count(*) FROM lineitem WHERE octet_length(l_comment || l_comment) > 40; - count -------- + count +--------------------------------------------------------------------- 8148 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(concat(l_comment, l_comment)) > 40; - count -------- + count +--------------------------------------------------------------------- 8148 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(l_comment) + octet_length('randomtext'::text) > 40; - count -------- + count +--------------------------------------------------------------------- 4611 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(l_comment) + 10 > 40; - count -------- + count +--------------------------------------------------------------------- 4611 (1 row) SELECT count(*) FROM lineitem WHERE (l_receiptdate::timestamp - l_shipdate::timestamp) > interval '5 days'; - count -------- + count +--------------------------------------------------------------------- 10008 (1 row) -- can push down queries where no columns present on the WHERE clause SELECT count(*) FROM lineitem WHERE random() = -0.1; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- boolean tests can be pushed down SELECT count(*) FROM lineitem WHERE (l_partkey > 10000) is true; - count -------- + count +--------------------------------------------------------------------- 11423 (1 row) -- scalar array operator expressions can be pushed down SELECT count(*) FROM lineitem WHERE l_partkey = ANY(ARRAY[19353, 19354, 19355]); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- some more scalar array operator expressions SELECT count(*) FROM lineitem WHERE l_partkey = ALL(ARRAY[19353]); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- operator expressions involving arrays SELECT count(*) FROM lineitem WHERE ARRAY[19353, 19354, 19355] @> ARRAY[l_partkey]; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- coerced via io expressions can be pushed down SELECT count(*) FROM lineitem WHERE (l_quantity/100)::int::bool::text::bool; - count -------- + count +--------------------------------------------------------------------- 260 (1 row) -- case expressions can be pushed down SELECT count(*) FROM lineitem WHERE (CASE WHEN l_orderkey > 4000 THEN l_partkey / 100 > 1 ELSE false END); - count -------- + count +--------------------------------------------------------------------- 7948 (1 row) -- coalesce expressions can be pushed down SELECT count(*) FROM lineitem WHERE COALESCE((l_partkey/50000)::bool, false); - count -------- + count +--------------------------------------------------------------------- 9122 (1 row) -- nullif expressions can be pushed down SELECT count(*) FROM lineitem WHERE NULLIF((l_partkey/50000)::bool, false); - count -------- + count +--------------------------------------------------------------------- 9122 (1 row) -- null test expressions can be pushed down SELECT count(*) FROM orders WHERE o_comment IS NOT null; - count -------- + count +--------------------------------------------------------------------- 2985 (1 row) -- functions can be pushed down SELECT count(*) FROM lineitem WHERE isfinite(l_shipdate); - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) -- constant expressions can be pushed down SELECT count(*) FROM lineitem WHERE 0 != 0; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- distinct expressions can be pushed down SELECT count(*) FROM lineitem WHERE l_partkey IS DISTINCT FROM 50040; - count -------- + count +--------------------------------------------------------------------- 11999 (1 row) -- row compare expression can be pushed down SELECT count(*) FROM lineitem WHERE row(l_partkey, 2, 3) > row(2000, 2, 3); - count -------- + count +--------------------------------------------------------------------- 11882 (1 row) @@ -251,8 +251,8 @@ SELECT count(*) FROM lineitem isfinite(l_shipdate) AND l_partkey IS DISTINCT FROM 50040 AND row(l_partkey, 2, 3) > row(2000, 2, 3); - count -------- + count +--------------------------------------------------------------------- 137 (1 row) @@ -263,8 +263,8 @@ SELECT l_linenumber FROM lineitem ORDER BY l_linenumber LIMIT 1; - l_linenumber --------------- + l_linenumber +--------------------------------------------------------------------- 1 (1 row) @@ -276,8 +276,8 @@ SELECT count(*) * l_discount as total_discount, count(*), sum(l_tax), l_discount l_discount ORDER BY total_discount DESC, sum(l_tax) DESC; - total_discount | count | sum | l_discount -----------------+-------+-------+------------ + total_discount | count | sum | l_discount +--------------------------------------------------------------------- 104.80 | 1048 | 41.08 | 0.10 98.55 | 1095 | 44.15 | 0.09 90.64 | 1133 | 45.94 | 0.08 @@ -299,8 +299,8 @@ SELECT l_linenumber FROM lineitem ORDER BY l_linenumber LIMIT 1; - l_linenumber --------------- + l_linenumber +--------------------------------------------------------------------- 2 (1 row) @@ -314,30 +314,30 @@ SELECT max(l_linenumber), min(l_discount), l_receiptdate FROM lineitem ORDER BY l_receiptdate LIMIT 1; - max | min | l_receiptdate ------+------+--------------- + max | min | l_receiptdate +--------------------------------------------------------------------- 3 | 0.07 | 01-09-1992 (1 row) -- Check that we can handle implicit and explicit join clause definitions. SELECT count(*) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5; - count -------- + count +--------------------------------------------------------------------- 951 (1 row) SELECT count(*) FROM lineitem JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5; - count -------- + count +--------------------------------------------------------------------- 951 (1 row) SELECT count(*) FROM lineitem JOIN orders ON l_orderkey = o_orderkey WHERE l_quantity < 5; - count -------- + count +--------------------------------------------------------------------- 951 (1 row) @@ -355,8 +355,8 @@ ERROR: cannot perform distributed planning on this query DETAIL: Subqueries with offset are not supported yet -- Simple LIMIT/OFFSET with ORDER BY SELECT o_orderkey FROM orders ORDER BY o_orderkey LIMIT 10 OFFSET 20; - o_orderkey ------------- + o_orderkey +--------------------------------------------------------------------- 69 70 71 @@ -371,19 +371,19 @@ SELECT o_orderkey FROM orders ORDER BY o_orderkey LIMIT 10 OFFSET 20; -- LIMIT/OFFSET with a subquery SET citus.task_executor_type TO 'task-tracker'; -SELECT +SELECT customer_keys.o_custkey, - SUM(order_count) AS total_order_count -FROM - (SELECT o_custkey, o_orderstatus, COUNT(*) AS order_count + SUM(order_count) AS total_order_count +FROM + (SELECT o_custkey, o_orderstatus, COUNT(*) AS order_count FROM orders GROUP BY o_custkey, o_orderstatus ) customer_keys -GROUP BY +GROUP BY customer_keys.o_custkey -ORDER BY +ORDER BY customer_keys.o_custkey DESC LIMIT 10 OFFSET 20; - o_custkey | total_order_count ------------+------------------- + o_custkey | total_order_count +--------------------------------------------------------------------- 1466 | 1 1465 | 2 1463 | 4 @@ -415,8 +415,8 @@ CREATE TEMP TABLE temp_limit_test_4 AS SELECT o_custkey, COUNT(*) AS ccnt FROM orders GROUP BY o_custkey ORDER BY ccnt DESC LIMIT 10 OFFSET 15; -- OFFSET without LIMIT SELECT o_custkey FROM orders ORDER BY o_custkey OFFSET 2980; - o_custkey ------------ + o_custkey +--------------------------------------------------------------------- 1498 1498 1499 @@ -425,19 +425,19 @@ SELECT o_custkey FROM orders ORDER BY o_custkey OFFSET 2980; (5 rows) -- LIMIT/OFFSET with Joins -SELECT +SELECT li.l_partkey, o.o_custkey, li.l_quantity -FROM +FROM lineitem li JOIN orders o ON li.l_orderkey = o.o_orderkey -WHERE +WHERE li.l_quantity > 25 ORDER BY 1, 2, 3 LIMIT 10 OFFSET 20; DEBUG: push down of limit count: 30 - l_partkey | o_custkey | l_quantity ------------+-----------+------------ + l_partkey | o_custkey | l_quantity +--------------------------------------------------------------------- 655 | 58 | 50.00 669 | 319 | 34.00 699 | 1255 | 50.00 @@ -464,8 +464,8 @@ SELECT GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_orderkey | sum | sum | count | count | max | max -------------+-----------+-----------+-------+-------+-----------+---------- + l_orderkey | sum | sum | count | count | max | max +--------------------------------------------------------------------- 12804 | 440012.71 | 45788.16 | 7 | 1 | 94398.00 | 45788.16 9863 | 412560.63 | 175647.63 | 7 | 3 | 85723.77 | 50769.14 2567 | 412076.77 | 59722.26 | 7 | 1 | 94894.00 | 9784.02 @@ -491,8 +491,8 @@ SELECT HAVING count(*) FILTER (WHERE l_shipmode = 'AIR') > 1 ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_orderkey | sum | sum | count | count | max | max -------------+-----------+-----------+-------+-------+----------+---------- + l_orderkey | sum | sum | count | count | max | max +--------------------------------------------------------------------- 9863 | 412560.63 | 175647.63 | 7 | 3 | 85723.77 | 50769.14 12039 | 407048.94 | 76406.30 | 7 | 2 | 94471.02 | 19679.30 5606 | 403595.91 | 36531.51 | 7 | 2 | 94890.18 | 30582.75 diff --git a/src/test/regress/expected/multi_count_type_conversion.out b/src/test/regress/expected/multi_count_type_conversion.out index a258ebef2..b2126bef7 100644 --- a/src/test/regress/expected/multi_count_type_conversion.out +++ b/src/test/regress/expected/multi_count_type_conversion.out @@ -10,8 +10,8 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity DESC; - count_quantity | l_quantity -----------------+------------ + count_quantity | l_quantity +--------------------------------------------------------------------- 219 | 13.00 222 | 29.00 227 | 3.00 @@ -48,8 +48,8 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity DESC, l_quantity ASC; - count_quantity | l_quantity -----------------+------------ + count_quantity | l_quantity +--------------------------------------------------------------------- 273 | 28.00 264 | 30.00 261 | 23.00 diff --git a/src/test/regress/expected/multi_create_shards.out b/src/test/regress/expected/multi_create_shards.out index ed177108f..6604a247f 100644 --- a/src/test/regress/expected/multi_create_shards.out +++ b/src/test/regress/expected/multi_create_shards.out @@ -51,8 +51,6 @@ ERROR: column "bad_column" of relation "table_to_distribute" does not exist -- use unrecognized partition type SELECT create_distributed_table('table_to_distribute', 'name', 'unrecognized'); ERROR: invalid input value for enum citus.distribution_type: "unrecognized" -LINE 1: ..._distributed_table('table_to_distribute', 'name', 'unrecogni... - ^ -- use a partition column of a type lacking any default operator class SELECT create_distributed_table('table_to_distribute', 'json_data', 'hash'); ERROR: data type json has no default operator class for specified partition method @@ -63,15 +61,15 @@ ERROR: could not identify a hash function for type dummy_type DETAIL: Partition column types must have a hash function defined to use hash partitioning. -- distribute table and inspect side effects SELECT master_create_distributed_table('table_to_distribute', 'name', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT partmethod, partkey FROM pg_dist_partition WHERE logicalrelid = 'table_to_distribute'::regclass; - partmethod | partkey -------------+-------------------------------------------------------------------------------------------------------------------------- + partmethod | partkey +--------------------------------------------------------------------- h | {VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} (1 row) @@ -87,16 +85,16 @@ ERROR: replication_factor (3) exceeds number of worker nodes (2) HINT: Add more worker nodes or try again with a lower replication factor. -- finally, create shards and inspect metadata SELECT master_create_worker_shards('table_to_distribute', 16, 1); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 'table_to_distribute'::regclass ORDER BY (shardminvalue::integer) ASC; - shardstorage | shardminvalue | shardmaxvalue ---------------+---------------+--------------- + shardstorage | shardminvalue | shardmaxvalue +--------------------------------------------------------------------- t | -2147483648 | -1879048193 t | -1879048192 | -1610612737 t | -1610612736 | -1342177281 @@ -121,14 +119,14 @@ SELECT count(*) AS shard_count, FROM pg_dist_shard WHERE logicalrelid='table_to_distribute'::regclass GROUP BY shard_size; - shard_count | shard_size --------------+------------ + shard_count | shard_size +--------------------------------------------------------------------- 16 | 268435455 (1 row) SELECT COUNT(*) FROM pg_class WHERE relname LIKE 'table_to_distribute%' AND relkind = 'r'; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -137,17 +135,17 @@ SELECT master_create_worker_shards('table_to_distribute', 16, 1); ERROR: table "table_to_distribute" has already had shards created for it -- test list sorting SELECT sort_names('sumedh', 'jason', 'ozgun'); - sort_names ------------- + sort_names +--------------------------------------------------------------------- jason + ozgun + sumedh + - + (1 row) SELECT COUNT(*) FROM pg_class WHERE relname LIKE 'throwaway%' AND relkind = 'r'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -162,16 +160,16 @@ SET citus.shard_count TO 16; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('foreign_table_to_distribute', 'id', 'hash'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 'foreign_table_to_distribute'::regclass ORDER BY (shardminvalue::integer) ASC; - shardstorage | shardminvalue | shardmaxvalue ---------------+---------------+--------------- + shardstorage | shardminvalue | shardmaxvalue +--------------------------------------------------------------------- f | -2147483648 | -1879048193 f | -1879048192 | -1610612737 f | -1610612736 | -1342177281 @@ -198,9 +196,9 @@ CREATE TABLE weird_shard_count ); SET citus.shard_count TO 7; SELECT create_distributed_table('weird_shard_count', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Citus ensures all shards are roughly the same size @@ -208,8 +206,8 @@ SELECT shardmaxvalue::integer - shardminvalue::integer AS shard_size FROM pg_dist_shard WHERE logicalrelid = 'weird_shard_count'::regclass ORDER BY shardminvalue::integer ASC; - shard_size ------------- + shard_size +--------------------------------------------------------------------- 613566755 613566755 613566755 diff --git a/src/test/regress/expected/multi_create_table.out b/src/test/regress/expected/multi_create_table.out index 431f190aa..7132d4e51 100644 --- a/src/test/regress/expected/multi_create_table.out +++ b/src/test/regress/expected/multi_create_table.out @@ -29,9 +29,9 @@ SELECT create_distributed_table('lineitem', 'l_orderkey', 'append'); WARNING: table "lineitem" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE INDEX lineitem_time_index ON lineitem (l_shipdate); @@ -50,9 +50,9 @@ SELECT create_distributed_table('orders', 'o_orderkey', 'append'); WARNING: table "orders" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE orders_reference ( @@ -67,9 +67,9 @@ CREATE TABLE orders_reference ( o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); SELECT create_reference_table('orders_reference'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE customer ( @@ -82,9 +82,9 @@ CREATE TABLE customer ( c_mktsegment char(10) not null, c_comment varchar(117) not null); SELECT create_reference_table('customer'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE customer_append ( @@ -97,9 +97,9 @@ CREATE TABLE customer_append ( c_mktsegment char(10) not null, c_comment varchar(117) not null); SELECT create_distributed_table('customer_append', 'c_custkey', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE nation ( @@ -108,9 +108,9 @@ CREATE TABLE nation ( n_regionkey integer not null, n_comment varchar(152)); SELECT create_reference_table('nation'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE part ( @@ -124,9 +124,9 @@ CREATE TABLE part ( p_retailprice decimal(15,2) not null, p_comment varchar(23) not null); SELECT create_reference_table('part'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE part_append ( @@ -140,9 +140,9 @@ CREATE TABLE part_append ( p_retailprice decimal(15,2) not null, p_comment varchar(23) not null); SELECT create_distributed_table('part_append', 'p_partkey', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE supplier @@ -156,12 +156,12 @@ CREATE TABLE supplier s_comment varchar(101) not null ); SELECT create_reference_table('supplier'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) --- create a single shard supplier table which is not +-- create a single shard supplier table which is not -- a reference table CREATE TABLE supplier_single_shard ( @@ -174,9 +174,9 @@ CREATE TABLE supplier_single_shard s_comment varchar(101) not null ); SELECT create_distributed_table('supplier_single_shard', 's_suppkey', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE mx_table_test (col1 int, col2 text); @@ -190,14 +190,14 @@ HINT: Try again after reducing "citus.shard_replication_factor" to one or setti -- ok, so now actually create the one-off MX table SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('mx_table_test', 'col1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_table_test'::regclass; - repmodel ----------- + repmodel +--------------------------------------------------------------------- s (1 row) @@ -208,14 +208,14 @@ SELECT master_create_distributed_table('s_table', 'a', 'hash'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='s_table'::regclass; - repmodel ----------- + repmodel +--------------------------------------------------------------------- c (1 row) @@ -227,7 +227,7 @@ DETAIL: The table s_table is marked as streaming replicated and the shard repli HINT: Use replication factor 1. DROP TABLE s_table; RESET citus.replication_model; --- Show that create_distributed_table with append and range distributions ignore +-- Show that create_distributed_table with append and range distributions ignore -- citus.replication_model GUC SET citus.shard_replication_factor TO 2; SET citus.replication_model TO streaming; @@ -235,14 +235,14 @@ CREATE TABLE repmodel_test (a int); SELECT create_distributed_table('repmodel_test', 'a', 'append'); NOTICE: using statement-based replication DETAIL: Streaming replication is supported only for hash-distributed tables. - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel ----------- + repmodel +--------------------------------------------------------------------- c (1 row) @@ -251,14 +251,14 @@ CREATE TABLE repmodel_test (a int); SELECT create_distributed_table('repmodel_test', 'a', 'range'); NOTICE: using statement-based replication DETAIL: Streaming replication is supported only for hash-distributed tables. - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel ----------- + repmodel +--------------------------------------------------------------------- c (1 row) @@ -270,14 +270,14 @@ SELECT master_create_distributed_table('repmodel_test', 'a', 'hash'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel ----------- + repmodel +--------------------------------------------------------------------- c (1 row) @@ -287,14 +287,14 @@ SELECT master_create_distributed_table('repmodel_test', 'a', 'append'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel ----------- + repmodel +--------------------------------------------------------------------- c (1 row) @@ -304,14 +304,14 @@ SELECT master_create_distributed_table('repmodel_test', 'a', 'range'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel ----------- + repmodel +--------------------------------------------------------------------- c (1 row) @@ -322,14 +322,14 @@ CREATE TABLE repmodel_test (a int); SELECT create_distributed_table('repmodel_test', 'a', 'append'); NOTICE: using statement-based replication DETAIL: Streaming replication is supported only for hash-distributed tables. - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel ----------- + repmodel +--------------------------------------------------------------------- c (1 row) @@ -338,14 +338,14 @@ CREATE TABLE repmodel_test (a int); SELECT create_distributed_table('repmodel_test', 'a', 'range'); NOTICE: using statement-based replication DETAIL: Streaming replication is supported only for hash-distributed tables. - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel ----------- + repmodel +--------------------------------------------------------------------- c (1 row) @@ -355,14 +355,14 @@ SELECT master_create_distributed_table('repmodel_test', 'a', 'hash'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel ----------- + repmodel +--------------------------------------------------------------------- c (1 row) @@ -372,14 +372,14 @@ SELECT master_create_distributed_table('repmodel_test', 'a', 'append'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel ----------- + repmodel +--------------------------------------------------------------------- c (1 row) @@ -389,14 +389,14 @@ SELECT master_create_distributed_table('repmodel_test', 'a', 'range'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel ----------- + repmodel +--------------------------------------------------------------------- c (1 row) @@ -423,14 +423,14 @@ HINT: Empty your table before distributing it. -- create_distributed_table creates shards and copies data into the distributed table SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM data_load_test ORDER BY col1; - col1 | col2 | col3 -------+-------+------ + col1 | col2 | col3 +--------------------------------------------------------------------- 132 | hello | 1 243 | world | 2 (2 rows) @@ -439,40 +439,40 @@ DROP TABLE data_load_test; -- test queries on distributed tables with no shards CREATE TABLE no_shard_test (col1 int, col2 text); SELECT create_distributed_table('no_shard_test', 'col1', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM no_shard_test WHERE col1 > 1; - col1 | col2 -------+------ + col1 | col2 +--------------------------------------------------------------------- (0 rows) DROP TABLE no_shard_test; CREATE TABLE no_shard_test (col1 int, col2 text); SELECT create_distributed_table('no_shard_test', 'col1', 'range'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM no_shard_test WHERE col1 > 1; - col1 | col2 -------+------ + col1 | col2 +--------------------------------------------------------------------- (0 rows) DROP TABLE no_shard_test; CREATE TABLE no_shard_test (col1 int, col2 text); SELECT master_create_distributed_table('no_shard_test', 'col1', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM no_shard_test WHERE col1 > 1; - col1 | col2 -------+------ + col1 | col2 +--------------------------------------------------------------------- (0 rows) DROP TABLE no_shard_test; @@ -482,16 +482,16 @@ CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO data_load_test VALUES (243, 'world'); END; SELECT * FROM data_load_test ORDER BY col1; - col1 | col2 | col3 -------+-------+------ + col1 | col2 | col3 +--------------------------------------------------------------------- 132 | hello | 1 243 | world | 2 (2 rows) @@ -503,25 +503,25 @@ CREATE TABLE data_load_test1 (col1 int, col2 text, col3 serial); INSERT INTO data_load_test1 VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test1', 'col1'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE data_load_test2 (col1 int, col2 text, col3 serial); INSERT INTO data_load_test2 VALUES (132, 'world'); SELECT create_distributed_table('data_load_test2', 'col1'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT a.col2 ||' '|| b.col2 FROM data_load_test1 a JOIN data_load_test2 b USING (col1) WHERE col1 = 132; - ?column? -------------- + ?column? +--------------------------------------------------------------------- hello world (1 row) @@ -530,8 +530,8 @@ END; -- There should be no table on the worker node \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'data_load_test%'; - relname ---------- + relname +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -541,9 +541,9 @@ CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE INDEX data_load_test_idx ON data_load_test (col2); @@ -555,9 +555,9 @@ CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) DROP TABLE data_load_test; @@ -568,9 +568,9 @@ CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO data_load_test VALUES (243, 'world'); @@ -583,23 +583,23 @@ INSERT INTO data_load_test VALUES (243, 'world', 'hello'); ALTER TABLE data_load_test DROP COLUMN col1; SELECT create_distributed_table('data_load_test', 'col3'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM data_load_test ORDER BY col2; - col2 | col3 | CoL4") --------+-------+-------- - hello | world | - world | hello | + col2 | col3 | CoL4") +--------------------------------------------------------------------- + hello | world | + world | hello | (2 rows) -- make sure the tuple went to the right shard SELECT * FROM data_load_test WHERE col3 = 'world'; - col2 | col3 | CoL4") --------+-------+-------- - hello | world | + col2 | col3 | CoL4") +--------------------------------------------------------------------- + hello | world | (1 row) DROP TABLE data_load_test; @@ -607,16 +607,16 @@ SET citus.shard_replication_factor TO default; SET citus.shard_count to 4; CREATE TABLE lineitem_hash_part (like lineitem); SELECT create_distributed_table('lineitem_hash_part', 'l_orderkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE orders_hash_part (like orders); SELECT create_distributed_table('orders_hash_part', 'o_orderkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE UNLOGGED TABLE unlogged_table @@ -625,22 +625,22 @@ CREATE UNLOGGED TABLE unlogged_table value text ); SELECT create_distributed_table('unlogged_table', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM master_get_table_ddl_events('unlogged_table'); - master_get_table_ddl_events --------------------------------------------------------------------- + master_get_table_ddl_events +--------------------------------------------------------------------- CREATE UNLOGGED TABLE public.unlogged_table (key text, value text) ALTER TABLE public.unlogged_table OWNER TO postgres (2 rows) \c - - - :worker_1_port SELECT relpersistence FROM pg_class WHERE relname LIKE 'unlogged_table_%'; - relpersistence ----------------- + relpersistence +--------------------------------------------------------------------- u u u @@ -652,22 +652,22 @@ SELECT relpersistence FROM pg_class WHERE relname LIKE 'unlogged_table_%'; BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); SELECT create_distributed_table('rollback_table','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ROLLBACK; -- Table should not exist on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%'); - Column | Type | Modifiers ---------+------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- (0 rows) \c - - - :master_port --- Insert 3 rows to make sure that copy after shard creation touches the same --- worker node twice. +-- Insert 3 rows to make sure that copy after shard creation touches the same +-- worker node twice. BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); INSERT INTO rollback_table VALUES(1, 'Name_1'); @@ -675,35 +675,35 @@ INSERT INTO rollback_table VALUES(2, 'Name_2'); INSERT INTO rollback_table VALUES(3, 'Name_3'); SELECT create_distributed_table('rollback_table','id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ROLLBACK; -- Table should not exist on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%'); - Column | Type | Modifiers ---------+------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- (0 rows) \c - - - :master_port BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); SELECT create_distributed_table('rollback_table','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \copy rollback_table from stdin delimiter ',' CREATE INDEX rollback_index ON rollback_table(id); COMMIT; --- Check the table is created +-- Check the table is created SELECT count(*) FROM rollback_table; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) @@ -711,9 +711,9 @@ DROP TABLE rollback_table; BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); SELECT create_distributed_table('rollback_table','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \copy rollback_table from stdin delimiter ',' @@ -721,24 +721,24 @@ ROLLBACK; -- Table should not exist on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%'); - Column | Type | Modifiers ---------+------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- (0 rows) \c - - - :master_port BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE tt2(id int); SELECT create_distributed_table('tt2','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO tt1 VALUES(1); @@ -747,33 +747,33 @@ COMMIT; -- Table should exist on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360069'::regclass; - Column | Type | Modifiers ---------+---------+----------- - id | integer | + Column | Type | Modifiers +--------------------------------------------------------------------- + id | integer | (1 row) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt2_360073'::regclass; - Column | Type | Modifiers ---------+---------+----------- - id | integer | + Column | Type | Modifiers +--------------------------------------------------------------------- + id | integer | (1 row) \c - - - :master_port DROP TABLE tt1; DROP TABLE tt2; --- It is known that creating a table with master_create_empty_shard is not +-- It is known that creating a table with master_create_empty_shard is not -- transactional, so table stay remaining on the worker node after the rollback BEGIN; CREATE TABLE append_tt1(id int); SELECT create_distributed_table('append_tt1','id','append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_empty_shard('append_tt1'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 360077 (1 row) @@ -781,17 +781,17 @@ ROLLBACK; -- Table exists on the worker node. \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.append_tt1_360077'::regclass; - Column | Type | Modifiers ---------+---------+----------- - id | integer | + Column | Type | Modifiers +--------------------------------------------------------------------- + id | integer | (1 row) \c - - - :master_port -- There should be no table on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'public.tt1%'); - Column | Type | Modifiers ---------+------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -802,15 +802,15 @@ CREATE TABLE tt1(id int); INSERT INTO tt1 VALUES(1); SELECT create_distributed_table('tt1','id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO tt1 VALUES(2); SELECT * FROM tt1 WHERE id = 1; - id ----- + id +--------------------------------------------------------------------- 1 (1 row) @@ -818,9 +818,9 @@ COMMIT; -- Placements should be created on the worker \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360078'::regclass; - Column | Type | Modifiers ---------+---------+----------- - id | integer | + Column | Type | Modifiers +--------------------------------------------------------------------- + id | integer | (1 row) \c - - - :master_port @@ -828,9 +828,9 @@ DROP TABLE tt1; BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) DROP TABLE tt1; @@ -838,8 +838,8 @@ COMMIT; -- There should be no table on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'tt1%'); - Column | Type | Modifiers ---------+------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -848,9 +848,9 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid f -- in this order of execution CREATE TABLE sample_table(id int); SELECT create_distributed_table('sample_table','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -858,24 +858,24 @@ CREATE TABLE stage_table (LIKE sample_table); \COPY stage_table FROM stdin; -- Note that this operation is a local copy SELECT create_distributed_table('stage_table', 'id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO sample_table SELECT * FROM stage_table; DROP TABLE stage_table; SELECT * FROM sample_table WHERE id = 3; - id ----- + id +--------------------------------------------------------------------- 3 (1 row) COMMIT; --- Show that rows of sample_table are updated +-- Show that rows of sample_table are updated SELECT count(*) FROM sample_table; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -885,37 +885,37 @@ DROP table sample_table; BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \COPY tt1 from stdin; CREATE TABLE tt2(like tt1); SELECT create_distributed_table('tt2','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \COPY tt2 from stdin; INSERT INTO tt1 SELECT * FROM tt2; SELECT * FROM tt1 WHERE id = 3; - id ----- + id +--------------------------------------------------------------------- 3 (1 row) SELECT * FROM tt2 WHERE id = 6; - id ----- + id +--------------------------------------------------------------------- 6 (1 row) END; SELECT count(*) FROM tt1; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) @@ -929,18 +929,18 @@ CREATE TABLE sc.ref(a int); insert into sc.ref SELECT s FROM generate_series(0, 100) s; SELECT create_reference_table('sc.ref'); NOTICE: Copying data from local table... - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE sc.hash(a int); insert into sc.hash SELECT s FROM generate_series(0, 100) s; SELECT create_distributed_table('sc.hash', 'a'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) COMMIT; @@ -951,18 +951,18 @@ CREATE TABLE sc2.hash(a int); insert into sc2.hash SELECT s FROM generate_series(0, 100) s; SELECT create_distributed_table('sc2.hash', 'a'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE sc2.ref(a int); insert into sc2.ref SELECT s FROM generate_series(0, 100) s; SELECT create_reference_table('sc2.ref'); NOTICE: Copying data from local table... - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) COMMIT; @@ -976,15 +976,15 @@ CREATE TABLE sc3.alter_replica_table ); ALTER TABLE sc3.alter_replica_table REPLICA IDENTITY USING INDEX alter_replica_table_pkey; SELECT create_distributed_table('sc3.alter_replica_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc3' LIMIT 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,i) (localhost,57638,t,i) (2 rows) @@ -1001,15 +1001,15 @@ SET search_path = 'sc4'; ALTER TABLE alter_replica_table REPLICA IDENTITY USING INDEX alter_replica_table_pkey; SELECT create_distributed_table('alter_replica_table', 'id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc4' LIMIT 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,i) (localhost,57638,t,i) (2 rows) @@ -1026,15 +1026,15 @@ INSERT INTO sc5.alter_replica_table(id) SELECT generate_series(1,100); ALTER TABLE sc5.alter_replica_table REPLICA IDENTITY FULL; SELECT create_distributed_table('sc5.alter_replica_table', 'id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc5' LIMIT 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,f) (localhost,57638,t,f) (2 rows) @@ -1051,15 +1051,15 @@ CREATE UNIQUE INDEX unique_idx ON sc6.alter_replica_table(id); ALTER TABLE sc6.alter_replica_table REPLICA IDENTITY USING INDEX unique_idx; SELECT create_distributed_table('sc6.alter_replica_table', 'id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc6' LIMIT 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,i) (localhost,57638,t,i) (2 rows) @@ -1075,15 +1075,15 @@ CREATE UNIQUE INDEX unique_idx ON alter_replica_table(id); ALTER TABLE alter_replica_table REPLICA IDENTITY USING INDEX unique_idx; SELECT create_distributed_table('alter_replica_table', 'id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='public' LIMIT 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,i) (localhost,57638,t,i) (2 rows) diff --git a/src/test/regress/expected/multi_create_table_constraints.out b/src/test/regress/expected/multi_create_table_constraints.out index 90fa5b4a8..a08a2f54b 100644 --- a/src/test/regress/expected/multi_create_table_constraints.out +++ b/src/test/regress/expected/multi_create_table_constraints.out @@ -12,9 +12,9 @@ SELECT create_distributed_table('uniq_cns_append_tables', 'partition_col', 'appe WARNING: table "uniq_cns_append_tables" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE excl_cns_append_tables @@ -27,9 +27,9 @@ SELECT create_distributed_table('excl_cns_append_tables', 'partition_col', 'appe WARNING: table "excl_cns_append_tables" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- test that Citus cannot distribute unique constraints that do not include @@ -59,7 +59,7 @@ CREATE TABLE ex_on_non_part_col SELECT create_distributed_table('ex_on_non_part_col', 'partition_col', 'hash'); ERROR: cannot create constraint on "ex_on_non_part_col" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). --- now show that Citus can distribute unique and EXCLUDE constraints that +-- now show that Citus can distribute unique and EXCLUDE constraints that -- include the partition column for hash-partitioned tables. -- However, EXCLUDE constraints must include the partition column with -- an equality operator. @@ -70,9 +70,9 @@ CREATE TABLE pk_on_part_col other_col integer ); SELECT create_distributed_table('pk_on_part_col', 'partition_col', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE uq_part_col @@ -81,9 +81,9 @@ CREATE TABLE uq_part_col other_col integer ); SELECT create_distributed_table('uq_part_col', 'partition_col', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE uq_two_columns @@ -93,16 +93,16 @@ CREATE TABLE uq_two_columns UNIQUE (partition_col, other_col) ); SELECT create_distributed_table('uq_two_columns', 'partition_col', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1); INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1); ERROR: duplicate key value violates unique constraint "uq_two_columns_partition_col_other_col_key_365008" DETAIL: Key (partition_col, other_col)=(1, 1) already exists. -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE ex_on_part_col ( partition_col integer, @@ -110,16 +110,16 @@ CREATE TABLE ex_on_part_col EXCLUDE (partition_col WITH =) ); SELECT create_distributed_table('ex_on_part_col', 'partition_col', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,2); ERROR: conflicting key value violates exclusion constraint "ex_on_part_col_partition_col_excl_365012" DETAIL: Key (partition_col)=(1) conflicts with existing key (partition_col)=(1). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE ex_on_two_columns ( partition_col integer, @@ -127,16 +127,16 @@ CREATE TABLE ex_on_two_columns EXCLUDE (partition_col WITH =, other_col WITH =) ); SELECT create_distributed_table('ex_on_two_columns', 'partition_col', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1); ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_partition_col_other_col_excl_365016" DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE ex_on_two_columns_prt ( partition_col integer, @@ -144,9 +144,9 @@ CREATE TABLE ex_on_two_columns_prt EXCLUDE (partition_col WITH =, other_col WITH =) WHERE (other_col > 100) ); SELECT create_distributed_table('ex_on_two_columns_prt', 'partition_col', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,1); @@ -155,7 +155,7 @@ INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,101); INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,101); ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_prt_partition_col_other_col_excl_365020" DETAIL: Key (partition_col, other_col)=(1, 101) conflicts with existing key (partition_col, other_col)=(1, 101). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE ex_wrong_operator ( partition_col tsrange, @@ -172,18 +172,18 @@ CREATE TABLE ex_overlaps EXCLUDE USING gist (other_col WITH &&, partition_col WITH =) ); SELECT create_distributed_table('ex_overlaps', 'partition_col', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-01 00:00:00, 2016-02-01 00:00:00]'); INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]'); ERROR: conflicting key value violates exclusion constraint "ex_overlaps_other_col_partition_col_excl_365027" DETAIL: Key (other_col, partition_col)=(["2016-01-15 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]) conflicts with existing key (other_col, partition_col)=(["2016-01-01 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]). -CONTEXT: while executing command on localhost:57638 --- now show that Citus can distribute unique and EXCLUDE constraints that --- include the partition column, for hash-partitioned tables. +CONTEXT: while executing command on localhost:xxxxx +-- now show that Citus can distribute unique and EXCLUDE constraints that +-- include the partition column, for hash-partitioned tables. -- However, EXCLUDE constraints must include the partition column with -- an equality operator. -- These tests are for NAMED constraints. @@ -193,9 +193,9 @@ CREATE TABLE pk_on_part_col_named other_col integer ); SELECT create_distributed_table('pk_on_part_col_named', 'partition_col', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE uq_part_col_named @@ -204,9 +204,9 @@ CREATE TABLE uq_part_col_named other_col integer ); SELECT create_distributed_table('uq_part_col_named', 'partition_col', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE uq_two_columns_named @@ -216,16 +216,16 @@ CREATE TABLE uq_two_columns_named CONSTRAINT uq_two_columns_named_uniq UNIQUE (partition_col, other_col) ); SELECT create_distributed_table('uq_two_columns_named', 'partition_col', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1); INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1); ERROR: duplicate key value violates unique constraint "uq_two_columns_named_uniq_365036" DETAIL: Key (partition_col, other_col)=(1, 1) already exists. -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE ex_on_part_col_named ( partition_col integer, @@ -233,16 +233,16 @@ CREATE TABLE ex_on_part_col_named CONSTRAINT ex_on_part_col_named_exclude EXCLUDE (partition_col WITH =) ); SELECT create_distributed_table('ex_on_part_col_named', 'partition_col', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,2); ERROR: conflicting key value violates exclusion constraint "ex_on_part_col_named_exclude_365040" DETAIL: Key (partition_col)=(1) conflicts with existing key (partition_col)=(1). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE ex_on_two_columns_named ( partition_col integer, @@ -250,16 +250,16 @@ CREATE TABLE ex_on_two_columns_named CONSTRAINT ex_on_two_columns_named_exclude EXCLUDE (partition_col WITH =, other_col WITH =) ); SELECT create_distributed_table('ex_on_two_columns_named', 'partition_col', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1); ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_named_exclude_365044" DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE ex_multiple_excludes ( partition_col integer, @@ -269,20 +269,20 @@ CREATE TABLE ex_multiple_excludes CONSTRAINT ex_multiple_excludes_excl2 EXCLUDE (partition_col WITH =, other_other_col WITH =) ); SELECT create_distributed_table('ex_multiple_excludes', 'partition_col', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,1,1); INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,1,2); ERROR: conflicting key value violates exclusion constraint "ex_multiple_excludes_excl1_365048" DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,2,1); ERROR: conflicting key value violates exclusion constraint "ex_multiple_excludes_excl2_365048" DETAIL: Key (partition_col, other_other_col)=(1, 1) conflicts with existing key (partition_col, other_other_col)=(1, 1). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE ex_wrong_operator_named ( partition_col tsrange, @@ -299,16 +299,16 @@ CREATE TABLE ex_overlaps_named CONSTRAINT ex_overlaps_operator_named_exclude EXCLUDE USING gist (other_col WITH &&, partition_col WITH =) ); SELECT create_distributed_table('ex_overlaps_named', 'partition_col', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-01 00:00:00, 2016-02-01 00:00:00]'); INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]'); ERROR: conflicting key value violates exclusion constraint "ex_overlaps_operator_named_exclude_365055" DETAIL: Key (other_col, partition_col)=(["2016-01-15 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]) conflicts with existing key (other_col, partition_col)=(["2016-01-01 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- now show that Citus allows unique constraints on range-partitioned tables. CREATE TABLE uq_range_tables ( @@ -316,9 +316,9 @@ CREATE TABLE uq_range_tables other_col integer ); SELECT create_distributed_table('uq_range_tables', 'partition_col', 'range'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- show that CHECK constraints are distributed. @@ -329,22 +329,22 @@ CREATE TABLE check_example other_other_col integer CHECK (abs(other_other_col) >= 100) ); SELECT create_distributed_table('check_example', 'partition_col', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'check_example_partition_col_key_365056'::regclass; - Column | Type | Definition ----------------+---------+--------------- + Column | Type | Definition +--------------------------------------------------------------------- partition_col | integer | partition_col (1 row) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365056'::regclass; - Constraint | Definition --------------------------------------+------------------------------------- + Constraint | Definition +--------------------------------------------------------------------- check_example_other_col_check | CHECK (other_col >= 100) check_example_other_other_col_check | CHECK (abs(other_other_col) >= 100) (2 rows) @@ -375,22 +375,22 @@ SET citus.shard_count = 4; SET citus.shard_replication_factor = 1; CREATE TABLE raw_table_1 (user_id int, UNIQUE(user_id)); SELECT create_distributed_table('raw_table_1', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE raw_table_2 (user_id int REFERENCES raw_table_1(user_id), UNIQUE(user_id)); SELECT create_distributed_table('raw_table_2', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- see that the constraint exists SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='raw_table_2'::regclass; - Constraint | Definition ---------------------------+------------------------------------------------------- + Constraint | Definition +--------------------------------------------------------------------- raw_table_2_user_id_fkey | FOREIGN KEY (user_id) REFERENCES raw_table_1(user_id) (1 row) @@ -404,8 +404,8 @@ DROP TABLE raw_table_1 CASCADE; NOTICE: drop cascades to constraint raw_table_2_user_id_fkey on table raw_table_2 -- see that the constraint also dropped SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='raw_table_2'::regclass; - Constraint | Definition -------------+------------ + Constraint | Definition +--------------------------------------------------------------------- (0 rows) -- drop the table as well diff --git a/src/test/regress/expected/multi_cross_shard.out b/src/test/regress/expected/multi_cross_shard.out index ed2a0e4e7..7ee7fb374 100644 --- a/src/test/regress/expected/multi_cross_shard.out +++ b/src/test/regress/expected/multi_cross_shard.out @@ -6,13 +6,13 @@ -- Create a distributed table and add data to it CREATE TABLE multi_task_table ( - id int, + id int, name varchar(20) ); SELECT create_distributed_table('multi_task_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO multi_task_table VALUES(1, 'elem_1'); @@ -20,14 +20,14 @@ INSERT INTO multi_task_table VALUES(2, 'elem_2'); INSERT INTO multi_task_table VALUES(3, 'elem_3'); -- Shouldn't log anything when the log level is 'off' SHOW citus.multi_task_query_log_level; - citus.multi_task_query_log_level ----------------------------------- + citus.multi_task_query_log_level +--------------------------------------------------------------------- off (1 row) SELECT * FROM multi_task_table ORDER BY 1; - id | name -----+-------- + id | name +--------------------------------------------------------------------- 1 | elem_1 2 | elem_2 3 | elem_3 @@ -38,8 +38,8 @@ SET citus.multi_task_query_log_level TO notice; SELECT * FROM multi_task_table ORDER BY 1; NOTICE: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. - id | name -----+-------- + id | name +--------------------------------------------------------------------- 1 | elem_1 2 | elem_2 3 | elem_3 @@ -48,8 +48,8 @@ HINT: Queries are split to multiple tasks if they have to be split into several SELECT AVG(id) AS avg_id FROM multi_task_table; NOTICE: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. - avg_id --------------------- + avg_id +--------------------------------------------------------------------- 2.0000000000000000 (1 row) @@ -58,7 +58,7 @@ SET citus.multi_task_query_log_level TO error; SELECT * FROM multi_task_table; ERROR: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. --- Check the log message with INSERT INTO ... SELECT +-- Check the log message with INSERT INTO ... SELECT CREATE TABLE raw_table ( id int, @@ -70,15 +70,15 @@ CREATE TABLE summary_table order_sum BIGINT ); SELECT create_distributed_table('raw_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('summary_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO raw_table VALUES(1, '15'); @@ -102,8 +102,8 @@ INSERT INTO summary_table SELECT id, SUM(order_count) FROM raw_table WHERE id = -- Should have four rows (three rows from the query without where and the one from with where) SET citus.multi_task_query_log_level to DEFAULT; SELECT * FROM summary_table ORDER BY 1,2; - id | order_sum -----+----------- + id | order_sum +--------------------------------------------------------------------- 1 | 35 1 | 35 2 | 40 @@ -126,8 +126,8 @@ ROLLBACK; -- Should have only four rows since the transaction is rollbacked. SET citus.multi_task_query_log_level to DEFAULT; SELECT * FROM summary_table ORDER BY 1,2; - id | order_sum -----+----------- + id | order_sum +--------------------------------------------------------------------- 1 | 35 1 | 35 2 | 40 @@ -138,8 +138,8 @@ SELECT * FROM summary_table ORDER BY 1,2; SET citus.multi_task_query_log_level TO notice; -- Shouldn't log since it is a router select query SELECT * FROM raw_table WHERE ID = 1; - id | order_count -----+------------- + id | order_count +--------------------------------------------------------------------- 1 | 15 1 | 20 (2 rows) @@ -157,15 +157,15 @@ CREATE TABLE tt2 count bigint ); SELECT create_distributed_table('tt1', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('tt2', 'name'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO tt1 VALUES(1, 'Ahmet'); @@ -177,8 +177,8 @@ SET citus.task_executor_type to "task-tracker"; SELECT tt1.id, tt2.count from tt1,tt2 where tt1.id = tt2.id; NOTICE: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. - id | count -----+------- + id | count +--------------------------------------------------------------------- 1 | 5 2 | 15 (2 rows) diff --git a/src/test/regress/expected/multi_data_types.out b/src/test/regress/expected/multi_data_types.out index 4f814656f..5c44cb1eb 100644 --- a/src/test/regress/expected/multi_data_types.out +++ b/src/test/regress/expected/multi_data_types.out @@ -16,9 +16,9 @@ SELECT run_command_on_coordinator_and_workers($cf$ IMMUTABLE RETURNS NULL ON NULL INPUT; $cf$); - run_command_on_coordinator_and_workers ----------------------------------------- - + run_command_on_coordinator_and_workers +--------------------------------------------------------------------- + (1 row) SELECT run_command_on_coordinator_and_workers($cf$ @@ -28,9 +28,9 @@ SELECT run_command_on_coordinator_and_workers($cf$ IMMUTABLE RETURNS NULL ON NULL INPUT; $cf$); - run_command_on_coordinator_and_workers ----------------------------------------- - + run_command_on_coordinator_and_workers +--------------------------------------------------------------------- + (1 row) -- ... use that function to create a custom equality operator... @@ -42,9 +42,9 @@ SELECT run_command_on_coordinator_and_workers($co$ HASHES ); $co$); - run_command_on_coordinator_and_workers ----------------------------------------- - + run_command_on_coordinator_and_workers +--------------------------------------------------------------------- + (1 row) -- ... and create a custom operator family for hash indexes... @@ -52,7 +52,7 @@ CREATE OPERATOR FAMILY cats_op_fam USING hash; -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests CREATE FUNCTION test_composite_type_hash(test_composite_type) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' +AS 'SELECT hashtext( ($1.i + $1.i2)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; @@ -74,51 +74,51 @@ CREATE TABLE composite_type_partitioned_table ); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('composite_type_partitioned_table', 'col', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) --- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table +-- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO composite_type_partitioned_table VALUES (1, '(1, 2)'::test_composite_type); INSERT INTO composite_type_partitioned_table VALUES (2, '(3, 4)'::test_composite_type); INSERT INTO composite_type_partitioned_table VALUES (3, '(5, 6)'::test_composite_type); INSERT INTO composite_type_partitioned_table VALUES (4, '(7, 8)'::test_composite_type); INSERT INTO composite_type_partitioned_table VALUES (5, '(9, 10)'::test_composite_type); SELECT * FROM composite_type_partitioned_table WHERE col = '(7, 8)'::test_composite_type; - id | col -----+------- + id | col +--------------------------------------------------------------------- 4 | (7,8) (1 row) UPDATE composite_type_partitioned_table SET id = 6 WHERE col = '(7, 8)'::test_composite_type; SELECT * FROM composite_type_partitioned_table WHERE col = '(7, 8)'::test_composite_type; - id | col -----+------- + id | col +--------------------------------------------------------------------- 6 | (7,8) (1 row) -- create and distribute a table on enum type column CREATE TYPE bug_status AS ENUM ('new', 'open', 'closed'); CREATE TABLE bugs ( - id integer, + id integer, status bug_status ); SELECT create_distributed_table('bugs', 'status', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) --- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table +-- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO bugs VALUES (1, 'new'); INSERT INTO bugs VALUES (2, 'open'); INSERT INTO bugs VALUES (3, 'closed'); INSERT INTO bugs VALUES (4, 'closed'); INSERT INTO bugs VALUES (5, 'open'); SELECT * FROM bugs WHERE status = 'closed'::bug_status; - id | status -----+-------- + id | status +--------------------------------------------------------------------- 3 | closed 4 | closed (2 rows) @@ -126,40 +126,40 @@ SELECT * FROM bugs WHERE status = 'closed'::bug_status; UPDATE bugs SET status = 'closed'::bug_status WHERE id = 2; ERROR: modifying the partition value of rows is not allowed SELECT * FROM bugs WHERE status = 'open'::bug_status; - id | status -----+-------- + id | status +--------------------------------------------------------------------- 2 | open 5 | open (2 rows) -- create and distribute a table on varchar column -CREATE TABLE varchar_hash_partitioned_table +CREATE TABLE varchar_hash_partitioned_table ( id int, name varchar ); SELECT create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) --- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table +-- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO varchar_hash_partitioned_table VALUES (1, 'Jason'); INSERT INTO varchar_hash_partitioned_table VALUES (2, 'Ozgun'); INSERT INTO varchar_hash_partitioned_table VALUES (3, 'Onder'); INSERT INTO varchar_hash_partitioned_table VALUES (4, 'Sumedh'); INSERT INTO varchar_hash_partitioned_table VALUES (5, 'Marco'); SELECT * FROM varchar_hash_partitioned_table WHERE id = 1; - id | name -----+------- + id | name +--------------------------------------------------------------------- 1 | Jason (1 row) UPDATE varchar_hash_partitioned_table SET id = 6 WHERE name = 'Jason'; SELECT * FROM varchar_hash_partitioned_table WHERE id = 6; - id | name -----+------- + id | name +--------------------------------------------------------------------- 6 | Jason (1 row) diff --git a/src/test/regress/expected/multi_deparse_function.out b/src/test/regress/expected/multi_deparse_function.out index bc878bcaf..2af87f91e 100644 --- a/src/test/regress/expected/multi_deparse_function.out +++ b/src/test/regress/expected/multi_deparse_function.out @@ -62,9 +62,9 @@ CREATE FUNCTION add(integer, integer) RETURNS integer IMMUTABLE RETURNS NULL ON NULL INPUT; SELECT create_distributed_function('add(int,int)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT deparse_and_run_on_workers($cmd$ @@ -72,8 +72,8 @@ ALTER FUNCTION add CALLED ON NULL INPUT $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) CALLED ON NULL INPUT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -85,8 +85,8 @@ ALTER FUNCTION add RETURNS NULL ON NULL INPUT $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) STRICT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -96,8 +96,8 @@ ALTER FUNCTION add STRICT $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) STRICT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -107,8 +107,8 @@ ALTER FUNCTION add IMMUTABLE $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) IMMUTABLE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -118,8 +118,8 @@ ALTER FUNCTION add STABLE $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) STABLE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -129,8 +129,8 @@ ALTER FUNCTION add VOLATILE $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) VOLATILE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -140,8 +140,8 @@ ALTER FUNCTION add LEAKPROOF $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) LEAKPROOF; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -151,8 +151,8 @@ ALTER FUNCTION add NOT LEAKPROOF $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) NOT LEAKPROOF; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -164,8 +164,8 @@ ALTER FUNCTION add EXTERNAL SECURITY INVOKER $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SECURITY INVOKER; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -175,8 +175,8 @@ ALTER FUNCTION add SECURITY INVOKER $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SECURITY INVOKER; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -186,8 +186,8 @@ ALTER FUNCTION add EXTERNAL SECURITY DEFINER $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SECURITY DEFINER; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -197,8 +197,8 @@ ALTER FUNCTION add SECURITY DEFINER $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SECURITY DEFINER; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -208,8 +208,8 @@ ALTER FUNCTION add PARALLEL UNSAFE $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) PARALLEL UNSAFE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -219,8 +219,8 @@ ALTER FUNCTION add PARALLEL RESTRICTED $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) PARALLEL RESTRICTED; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -230,8 +230,8 @@ ALTER FUNCTION add PARALLEL SAFE $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) PARALLEL SAFE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -242,8 +242,8 @@ ALTER FUNCTION add COST 1234 $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) COST 1234.000000; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -253,8 +253,8 @@ ALTER FUNCTION add COST 1234.5 $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) COST 1234.500000; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -264,8 +264,8 @@ ALTER FUNCTION add SET log_min_messages = ERROR $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET log_min_messages = 'error'; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -275,8 +275,8 @@ ALTER FUNCTION add SET log_min_messages TO DEFAULT $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET log_min_messages TO DEFAULT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -286,8 +286,8 @@ ALTER FUNCTION add SET log_min_messages FROM CURRENT $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET log_min_messages FROM CURRENT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -297,8 +297,8 @@ ALTER FUNCTION add(int, int) SET TIME ZONE INTERVAL '-08:00' HOUR TO MINUTE; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET TIME ZONE INTERVAL '@ 8 hours ago'; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -308,8 +308,8 @@ ALTER FUNCTION add(int, int) SET TIME ZONE '-7'; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET timezone = '-7'; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -319,8 +319,8 @@ ALTER FUNCTION add(int, int) SET "citus.setting;'" TO 'hello '' world'; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET "citus.setting;'" = 'hello '' world'; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -330,8 +330,8 @@ ALTER FUNCTION add(int, int) SET "citus.setting;'" TO -3.2; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET "citus.setting;'" = -3.2; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -341,8 +341,8 @@ ALTER FUNCTION add(int, int) SET "citus.setting;'" TO -32; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET "citus.setting;'" = -32; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -354,8 +354,8 @@ ALTER FUNCTION add(int, int) SET "citus.setting;'" TO 'hello '' world', 'second $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET "citus.setting;'" = 'hello '' world', 'second '' item'; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers ---------------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: SET citus.setting;' takes only one argument") (localhost,57638,f,"ERROR: SET citus.setting;' takes only one argument") (2 rows) @@ -365,8 +365,8 @@ ALTER FUNCTION add RESET log_min_messages $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) RESET log_min_messages; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -376,8 +376,8 @@ ALTER FUNCTION add RESET ALL $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) RESET ALL; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -388,8 +388,8 @@ ALTER FUNCTION add RENAME TO summation $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) RENAME TO summation; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -403,8 +403,8 @@ ALTER FUNCTION summation RENAME TO add $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.summation(integer, integer) RENAME TO add; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -415,8 +415,8 @@ CREATE ROLE function_role; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers('CREATE ROLE function_role'); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -426,8 +426,8 @@ ALTER FUNCTION add OWNER TO function_role $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) OWNER TO function_role; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -437,8 +437,8 @@ ALTER FUNCTION add OWNER TO missing_role $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) OWNER TO missing_role; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: role ""missing_role"" does not exist") (localhost,57638,f,"ERROR: role ""missing_role"" does not exist") (2 rows) @@ -449,8 +449,8 @@ ALTER FUNCTION add SET SCHEMA public $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET SCHEMA public; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -462,8 +462,8 @@ ALTER FUNCTION public.add SET SCHEMA function_tests $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION public.add(integer, integer) SET SCHEMA function_tests; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -474,8 +474,8 @@ ALTER FUNCTION add DEPENDS ON EXTENSION citus $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) DEPENDS ON EXTENSION citus; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -486,8 +486,8 @@ ALTER FUNCTION pg_catalog.get_shard_id_for_distribution_column(table_name regcla $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION pg_catalog.get_shard_id_for_distribution_column(table_name regclass, distribution_value "any") PARALLEL SAFE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -496,8 +496,8 @@ CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE SELECT deparse_test($cmd$ DROP FUNCTION add(int,int); $cmd$); - deparse_test ------------------------------------------------------ + deparse_test +--------------------------------------------------------------------- DROP FUNCTION function_tests.add(integer, integer); (1 row) @@ -507,8 +507,8 @@ ALTER FUNCTION add volatile leakproof SECURITY DEFINER PARALLEL unsafe; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) VOLATILE LEAKPROOF SECURITY DEFINER PARALLEL UNSAFE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -520,8 +520,8 @@ DROP FUNCTION missing_function(int, text); $cmd$); INFO: Propagating deparsed query: DROP FUNCTION missing_function(pg_catalog.int4,text); CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers ---------------------------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: function missing_function(integer, text) does not exist") (localhost,57638,f,"ERROR: function missing_function(integer, text) does not exist") (2 rows) @@ -533,8 +533,8 @@ DROP FUNCTION IF EXISTS missing_function(int, text); $cmd$); INFO: Propagating deparsed query: DROP FUNCTION IF EXISTS missing_function(pg_catalog.int4,text); CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers -------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP FUNCTION") (localhost,57638,t,"DROP FUNCTION") (2 rows) @@ -544,8 +544,8 @@ DROP FUNCTION IF EXISTS missing_schema.missing_function(int,float); $cmd$); INFO: Propagating deparsed query: DROP FUNCTION IF EXISTS missing_schema.missing_function(pg_catalog.int4,pg_catalog.float8); CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers -------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP FUNCTION") (localhost,57638,t,"DROP FUNCTION") (2 rows) @@ -555,8 +555,8 @@ DROP FUNCTION IF EXISTS missing_func_without_args; $cmd$); INFO: Propagating deparsed query: DROP FUNCTION IF EXISTS missing_func_without_args; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers -------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP FUNCTION") (localhost,57638,t,"DROP FUNCTION") (2 rows) @@ -568,8 +568,8 @@ SELECT run_command_on_workers($$ CREATE SCHEMA IF NOT EXISTS "CiTuS.TeeN"; CREATE SCHEMA IF NOT EXISTS "CiTUS.TEEN2"; $$); - run_command_on_workers -------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE SCHEMA") (localhost,57638,t,"CREATE SCHEMA") (2 rows) @@ -582,15 +582,15 @@ CREATE FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text) RETURNS TEXT AS $$ SELECT 'Overloaded function called with param: ' || $1 $$ LANGUAGE SQL; SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"()'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT deparse_and_run_on_workers($cmd$ @@ -598,8 +598,8 @@ ALTER FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() SET SCHEMA "CiTUS.TEEN2" $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() SET SCHEMA "CiTUS.TEEN2"; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -610,8 +610,8 @@ DROP FUNCTION "CiTUS.TEEN2"."TeeNFunCT10N.1!?!"(),"CiTuS.TeeN"."TeeNFunCT10N.1!? $cmd$); INFO: Propagating deparsed query: DROP FUNCTION "CiTUS.TEEN2"."TeeNFunCT10N.1!?!"(), "CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text); CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers -------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP FUNCTION") (localhost,57638,t,"DROP FUNCTION") (2 rows) @@ -621,9 +621,9 @@ CREATE FUNCTION func_default_param(param INT DEFAULT 0) RETURNS TEXT AS $$ SELECT 'supplied param is : ' || param; $$ LANGUAGE SQL; SELECT create_distributed_function('func_default_param(INT)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT deparse_and_run_on_workers($cmd$ @@ -631,8 +631,8 @@ ALTER FUNCTION func_default_param RENAME TO func_with_default_param; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_default_param(param integer) RENAME TO func_with_default_param; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -642,9 +642,9 @@ CREATE FUNCTION func_out_param(IN param INT, OUT result TEXT) AS $$ SELECT 'supplied param is : ' || param; $$ LANGUAGE SQL; SELECT create_distributed_function('func_out_param(INT)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT deparse_and_run_on_workers($cmd$ @@ -652,8 +652,8 @@ ALTER FUNCTION func_out_param RENAME TO func_in_and_out_param; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_out_param(param integer, OUT result text) RENAME TO func_in_and_out_param; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -666,9 +666,9 @@ BEGIN END; $$ LANGUAGE plpgsql; SELECT create_distributed_function('square(NUMERIC)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT deparse_and_run_on_workers($cmd$ @@ -676,8 +676,8 @@ ALTER FUNCTION square SET search_path TO DEFAULT; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.square(INOUT a numeric) SET search_path TO DEFAULT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -697,9 +697,9 @@ BEGIN END; $$ LANGUAGE plpgsql; SELECT create_distributed_function('sum_avg(NUMERIC[])'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT deparse_and_run_on_workers($cmd$ @@ -707,8 +707,8 @@ ALTER FUNCTION sum_avg COST 10000; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.sum_avg(VARIADIC list numeric[], OUT total numeric, OUT average numeric) COST 10000.000000; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -719,9 +719,9 @@ CREATE FUNCTION func_custom_param(IN param intpair, OUT total INT) AS $$ SELECT param.x + param.y $$ LANGUAGE SQL; SELECT create_distributed_function('func_custom_param(intpair)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT deparse_and_run_on_workers($cmd$ @@ -729,8 +729,8 @@ ALTER FUNCTION func_custom_param RENAME TO func_with_custom_param; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_custom_param(param function_tests.intpair, OUT total integer) RENAME TO func_with_custom_param; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -741,9 +741,9 @@ CREATE FUNCTION func_returns_table(IN count INT) AS $$ SELECT i,i FROM generate_series(1,count) i $$ LANGUAGE SQL; SELECT create_distributed_function('func_returns_table(INT)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT deparse_and_run_on_workers($cmd$ @@ -751,8 +751,8 @@ ALTER FUNCTION func_returns_table ROWS 100; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_returns_table(count integer) ROWS 100.000000; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -767,8 +767,8 @@ SELECT run_command_on_workers($$ DROP SCHEMA "CiTUS.TEEN2" CASCADE; DROP SCHEMA function_tests CASCADE; $$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") (2 rows) diff --git a/src/test/regress/expected/multi_deparse_procedure.out b/src/test/regress/expected/multi_deparse_procedure.out index 1e370a15e..f31578f38 100644 --- a/src/test/regress/expected/multi_deparse_procedure.out +++ b/src/test/regress/expected/multi_deparse_procedure.out @@ -17,13 +17,13 @@ -- SET configuration_parameter FROM CURRENT -- RESET configuration_parameter -- RESET ALL --- +-- -- DROP PROCEDURE [ IF EXISTS ] name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] [, ...] -- [ CASCADE | RESTRICT ] --- +-- -- Please note that current deparser does not return errors on some invalid queries. --- --- For example CALLED ON NULL INPUT action is valid only for FUNCTIONS, but we still +-- +-- For example CALLED ON NULL INPUT action is valid only for FUNCTIONS, but we still -- allow deparsing them here. SET citus.next_shard_id TO 20030000; SET citus.enable_ddl_propagation TO off; @@ -39,7 +39,7 @@ CREATE FUNCTION deparse_and_run_on_workers(text) RETURNS SETOF record AS $fnc$ WITH deparsed_query AS ( SELECT deparse_test($1) qualified_query ) - SELECT run_command_on_workers(qualified_query) FROM deparsed_query d + SELECT run_command_on_workers(qualified_query) FROM deparsed_query d $fnc$ LANGUAGE SQL; -- Create a simple PROCEDURE and distribute it @@ -50,16 +50,16 @@ BEGIN END; $proc$; SELECT create_distributed_function('raise_info(text)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info CALLED ON NULL INPUT $cmd$); - deparse_and_run_on_workers -------------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -67,8 +67,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info RETURNS NULL ON NULL INPUT $cmd$); - deparse_and_run_on_workers -------------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -76,8 +76,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info STRICT $cmd$); - deparse_and_run_on_workers -------------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -85,8 +85,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info IMMUTABLE $cmd$); - deparse_and_run_on_workers -------------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -94,8 +94,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info STABLE $cmd$); - deparse_and_run_on_workers -------------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -103,8 +103,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info VOLATILE $cmd$); - deparse_and_run_on_workers -------------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -112,8 +112,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info LEAKPROOF $cmd$); - deparse_and_run_on_workers -------------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -121,8 +121,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info NOT LEAKPROOF $cmd$); - deparse_and_run_on_workers -------------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -130,8 +130,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info EXTERNAL SECURITY INVOKER $cmd$); - deparse_and_run_on_workers ---------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -139,8 +139,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SECURITY INVOKER $cmd$); - deparse_and_run_on_workers ---------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -148,8 +148,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info EXTERNAL SECURITY DEFINER $cmd$); - deparse_and_run_on_workers ---------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -157,8 +157,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SECURITY DEFINER $cmd$); - deparse_and_run_on_workers ---------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -166,8 +166,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info PARALLEL UNSAFE $cmd$); - deparse_and_run_on_workers -------------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -175,8 +175,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info PARALLEL RESTRICTED $cmd$); - deparse_and_run_on_workers -------------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -184,8 +184,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info PARALLEL SAFE $cmd$); - deparse_and_run_on_workers -------------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -194,8 +194,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info COST 1234 $cmd$); - deparse_and_run_on_workers -------------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -203,8 +203,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info COST 1234.5 $cmd$); - deparse_and_run_on_workers -------------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -212,8 +212,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info ROWS 10 $cmd$); - deparse_and_run_on_workers -------------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -221,8 +221,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info ROWS 10.8 $cmd$); - deparse_and_run_on_workers -------------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -230,8 +230,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SECURITY INVOKER SET client_min_messages TO warning; $cmd$); - deparse_and_run_on_workers ---------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -239,8 +239,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SET log_min_messages = ERROR $cmd$); - deparse_and_run_on_workers ---------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -248,8 +248,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SET log_min_messages TO DEFAULT $cmd$); - deparse_and_run_on_workers ---------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -257,8 +257,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SET log_min_messages FROM CURRENT $cmd$); - deparse_and_run_on_workers ---------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -266,8 +266,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info RESET log_min_messages $cmd$); - deparse_and_run_on_workers ---------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -275,8 +275,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info RESET ALL $cmd$); - deparse_and_run_on_workers ---------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -285,8 +285,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info RENAME TO summation; $cmd$); - deparse_and_run_on_workers ---------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -295,8 +295,8 @@ ALTER PROCEDURE raise_info RENAME TO summation; SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE summation RENAME TO raise_info; $cmd$); - deparse_and_run_on_workers ---------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -306,8 +306,8 @@ CREATE ROLE procedure_role; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE ROLE procedure_role;$$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -315,8 +315,8 @@ SELECT run_command_on_workers($$CREATE ROLE procedure_role;$$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info OWNER TO procedure_role $cmd$); - deparse_and_run_on_workers ---------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -324,8 +324,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info OWNER TO missing_role $cmd$); - deparse_and_run_on_workers --------------------------------------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: role ""missing_role"" does not exist") (localhost,57638,f,"ERROR: role ""missing_role"" does not exist") (2 rows) @@ -334,8 +334,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SET SCHEMA public; $cmd$); - deparse_and_run_on_workers ---------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -344,8 +344,8 @@ ALTER PROCEDURE raise_info SET SCHEMA public; SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE public.raise_info SET SCHEMA procedure_tests; $cmd$); - deparse_and_run_on_workers ---------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -354,8 +354,8 @@ ALTER PROCEDURE public.raise_info SET SCHEMA procedure_tests; SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info DEPENDS ON EXTENSION citus $cmd$); - deparse_and_run_on_workers ---------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -363,8 +363,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ DROP PROCEDURE raise_info(text); $cmd$); - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP PROCEDURE") (localhost,57638,t,"DROP PROCEDURE") (2 rows) @@ -373,8 +373,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ DROP PROCEDURE IF EXISTS missing_PROCEDURE(int, text); $cmd$); - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP PROCEDURE") (localhost,57638,t,"DROP PROCEDURE") (2 rows) @@ -382,8 +382,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ DROP PROCEDURE IF EXISTS missing_schema.missing_PROCEDURE(int,float); $cmd$); - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP PROCEDURE") (localhost,57638,t,"DROP PROCEDURE") (2 rows) @@ -391,8 +391,8 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ DROP PROCEDURE IF EXISTS missing_schema.missing_PROCEDURE(int,float) CASCADE; $cmd$); - deparse_and_run_on_workers --------------------------------------- + deparse_and_run_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP PROCEDURE") (localhost,57638,t,"DROP PROCEDURE") (2 rows) @@ -402,8 +402,8 @@ SET client_min_messages TO WARNING; -- suppress cascading objects dropping DROP SCHEMA procedure_tests CASCADE; DROP ROLE procedure_role; SELECT run_command_on_workers($$DROP ROLE procedure_role;$$); - run_command_on_workers ---------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP ROLE") (localhost,57638,t,"DROP ROLE") (2 rows) diff --git a/src/test/regress/expected/multi_deparse_shard_query.out b/src/test/regress/expected/multi_deparse_shard_query.out index 7f2369914..32a97eafa 100644 --- a/src/test/regress/expected/multi_deparse_shard_query.out +++ b/src/test/regress/expected/multi_deparse_shard_query.out @@ -20,9 +20,9 @@ CREATE TABLE raw_events_1 event_at date DEfAULT now() ); SELECT create_distributed_table('raw_events_1', 'tenant_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- create the first table @@ -38,14 +38,14 @@ CREATE TABLE raw_events_2 event_at date DEfAULT now() ); SELECT create_distributed_table('raw_events_2', 'tenant_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE aggregated_events (tenant_id bigint, - sum_value_1 bigint, + sum_value_1 bigint, average_value_2 float, average_value_3 float, sum_value_4 bigint, @@ -53,9 +53,9 @@ CREATE TABLE aggregated_events average_value_6 int, rollup_hour date); SELECT create_distributed_table('aggregated_events', 'tenant_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- start with very simple examples on a single table @@ -64,9 +64,9 @@ INSERT INTO raw_events_1 SELECT * FROM raw_events_1; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at FROM public.raw_events_1 - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) SELECT deparse_shard_query_test(' @@ -77,9 +77,9 @@ FROM raw_events_1; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, event_at) SELECT tenant_id, value_4, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) -- now that shuffle columns a bit on a single table @@ -91,9 +91,9 @@ FROM raw_events_1; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) -- same test on two different tables @@ -105,9 +105,9 @@ FROM raw_events_2; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_2 - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) -- lets do some simple aggregations @@ -121,9 +121,9 @@ GROUP BY tenant_id, date_trunc(\'hour\', event_at) '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, average_value_3, sum_value_4, average_value_6, rollup_hour) SELECT tenant_id, sum(value_1) AS sum, avg(value_3) AS avg, sum(value_4) AS sum, avg(value_6) AS avg, date_trunc('hour'::text, (event_at)::timestamp with time zone) AS date_trunc FROM public.raw_events_1 GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone)) - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) -- also some subqueries, JOINS with a complicated target lists @@ -138,9 +138,9 @@ WHERE raw_events_1.tenant_id = raw_events_2.tenant_id; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT raw_events_1.tenant_id, raw_events_2.value_3, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1, public.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) -- join with group by @@ -154,9 +154,9 @@ WHERE raw_events_1.tenant_id = raw_events_2.tenant_id GROUP BY raw_events_1.event_at '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT avg(raw_events_1.value_3) AS avg, max(raw_events_2.value_3) AS max, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1, public.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) GROUP BY raw_events_1.event_at - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) -- a more complicated JOIN @@ -174,9 +174,9 @@ ORDER BY r2.event_at DESC; '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4) SELECT r3.tenant_id, max(r1.value_4) AS max FROM public.raw_events_1 r1, public.raw_events_2 r2, public.raw_events_1 r3 WHERE ((r1.tenant_id OPERATOR(pg_catalog.=) r2.tenant_id) AND (r2.tenant_id OPERATOR(pg_catalog.=) r3.tenant_id)) GROUP BY r1.value_1, r3.tenant_id, r2.event_at ORDER BY r2.event_at DESC - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) -- queries with CTEs are supported @@ -191,9 +191,9 @@ GROUP BY event_at, tenant_id; '); INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5, rollup_hour) SELECT tenant_id, sum((value_5)::integer) AS sum, event_at FROM public.raw_events_1 GROUP BY event_at, tenant_id - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) SELECT deparse_shard_query_test(' @@ -207,9 +207,9 @@ GROUP BY event_at, tenant_id; '); INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, sum((value_5)::integer) AS sum FROM public.raw_events_1 GROUP BY event_at, tenant_id - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) SELECT deparse_shard_query_test(' @@ -226,9 +226,9 @@ WITH RECURSIVE hierarchy as ( SELECT * FROM hierarchy WHERE LEVEL <= 2; '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) WITH RECURSIVE hierarchy AS (SELECT raw_events_1.value_1, 1 AS level, raw_events_1.tenant_id FROM public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) UNION SELECT re.value_2, (h.level OPERATOR(pg_catalog.+) 1), re.tenant_id FROM (hierarchy h JOIN public.raw_events_1 re ON (((h.tenant_id OPERATOR(pg_catalog.=) re.tenant_id) AND (h.value_1 OPERATOR(pg_catalog.=) re.value_6))))) SELECT tenant_id, value_1, level FROM hierarchy WHERE (level OPERATOR(pg_catalog.<=) 2) - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) SELECT deparse_shard_query_test(' @@ -239,9 +239,9 @@ FROM raw_events_1; '); INFO: query: INSERT INTO public.aggregated_events (sum_value_1) SELECT DISTINCT value_1 FROM public.raw_events_1 - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) -- many filters suffled @@ -252,9 +252,9 @@ SELECT value_3, value_2, tenant_id WHERE (value_5 like \'%s\' or value_5 like \'%a\') and (tenant_id = 1) and (value_6 < 3000 or value_3 > 8000); '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, value_2, value_3 FROM public.raw_events_1 WHERE (((value_5 OPERATOR(pg_catalog.~~) '%s'::text) OR (value_5 OPERATOR(pg_catalog.~~) '%a'::text)) AND (tenant_id OPERATOR(pg_catalog.=) 1) AND ((value_6 OPERATOR(pg_catalog.<) 3000) OR (value_3 OPERATOR(pg_catalog.>) (8000)::double precision))) - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) SELECT deparse_shard_query_test(E' @@ -264,9 +264,9 @@ SELECT rank() OVER (PARTITION BY tenant_id ORDER BY value_6), tenant_id WHERE event_at = now(); '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, rank() OVER (PARTITION BY tenant_id ORDER BY value_6) AS rank FROM public.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now()) - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) SELECT deparse_shard_query_test(E' @@ -277,9 +277,9 @@ SELECT random(), int4eq(1, max(value_1))::int, value_6 GROUP BY event_at, value_7, value_6; '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4, sum_value_5) SELECT (int4eq(1, max(value_1)))::integer AS int4eq, value_6, random() AS random FROM public.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now()) GROUP BY event_at, value_7, value_6 - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) SELECT deparse_shard_query_test(' @@ -298,34 +298,34 @@ SELECT raw_events_1; '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1) SELECT max(tenant_id) AS max, count(DISTINCT CASE WHEN (value_1 OPERATOR(pg_catalog.>) 100) THEN tenant_id ELSE (value_6)::bigint END) AS c FROM public.raw_events_1 - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) SELECT deparse_shard_query_test(' INSERT INTO raw_events_1(value_7, value_1, tenant_id) -SELECT +SELECT value_7, value_1, tenant_id FROM - (SELECT + (SELECT tenant_id, value_2 as value_7, value_1 FROM raw_events_2 ) as foo '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_6, value_7, event_at) SELECT tenant_id, value_1, 10 AS value_6, value_7, (now())::date AS event_at FROM (SELECT raw_events_2.tenant_id, raw_events_2.value_2 AS value_7, raw_events_2.value_1 FROM public.raw_events_2) foo - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) SELECT deparse_shard_query_test(E' INSERT INTO aggregated_events(sum_value_1, tenant_id, sum_value_5) -SELECT +SELECT sum(value_1), tenant_id, sum(value_5::bigint) FROM - (SELECT + (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM raw_events_2, raw_events_1 @@ -336,43 +336,43 @@ GROUP BY tenant_id, date_trunc(\'hour\', event_at) '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, sum(value_1) AS sum, sum((value_5)::bigint) AS sum FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM public.raw_events_2, public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)) foo GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone)) - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) SELECT deparse_shard_query_test(E' INSERT INTO raw_events_2(tenant_id, value_1, value_2, value_3, value_4) -SELECT +SELECT tenant_id, value_1, value_2, value_3, value_4 FROM - (SELECT + (SELECT value_2, value_4, tenant_id, value_1, value_3 FROM raw_events_1 ) as foo '); INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) SELECT deparse_shard_query_test(E' INSERT INTO raw_events_2(tenant_id, value_1, value_4, value_2, value_3) -SELECT +SELECT * FROM - (SELECT + (SELECT value_2, value_4, tenant_id, value_1, value_3 FROM raw_events_1 ) as foo '); INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT value_2, value_4, value_1, value_3, tenant_id, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) -- use a column multiple times @@ -386,9 +386,9 @@ ORDER BY value_2, value_1; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_7, 10 AS value_6, value_7, (now())::date AS event_at FROM public.raw_events_1 ORDER BY value_2, value_1 - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) -- test dropped table as well @@ -401,8 +401,8 @@ FROM raw_events_1; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_4, 10 AS value_6, value_7, (now())::date AS event_at FROM public.raw_events_1 - deparse_shard_query_test --------------------------- - + deparse_shard_query_test +--------------------------------------------------------------------- + (1 row) diff --git a/src/test/regress/expected/multi_distributed_transaction_id.out b/src/test/regress/expected/multi_distributed_transaction_id.out index d2c3148b8..e622ff619 100644 --- a/src/test/regress/expected/multi_distributed_transaction_id.out +++ b/src/test/regress/expected/multi_distributed_transaction_id.out @@ -11,30 +11,30 @@ SET TIME ZONE 'PST8PDT'; -- should return uninitialized values if not in a transaction SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp ----------------------------+--------------------+------------------- - 0 | 0 | + initiator_node_identifier | transaction_number | transaction_stamp +--------------------------------------------------------------------- + 0 | 0 | (1 row) BEGIN; -- we should still see the uninitialized values SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+-------------------+---------- + initiator_node_identifier | transaction_number | transaction_stamp | ?column? +--------------------------------------------------------------------- 0 | 0 | | t (1 row) -- now assign a value SELECT assign_distributed_transaction_id(50, 50, '2016-01-01 00:00:00+0'); - assign_distributed_transaction_id ------------------------------------ - + assign_distributed_transaction_id +--------------------------------------------------------------------- + (1 row) -- see the assigned value SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+------------------------------+---------- + initiator_node_identifier | transaction_number | transaction_stamp | ?column? +--------------------------------------------------------------------- 50 | 50 | Thu Dec 31 16:00:00 2015 PST | t (1 row) @@ -44,8 +44,8 @@ ERROR: the backend has already been assigned a transaction id ROLLBACK; -- since the transaction finished, we should see the uninitialized values SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+-------------------+---------- + initiator_node_identifier | transaction_number | transaction_stamp | ?column? +--------------------------------------------------------------------- 0 | 0 | | t (1 row) @@ -53,16 +53,16 @@ SELECT initiator_node_identifier, transaction_number, transaction_stamp, (proces BEGIN; -- we should still see the uninitialized values SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+-------------------+---------- + initiator_node_identifier | transaction_number | transaction_stamp | ?column? +--------------------------------------------------------------------- 0 | 0 | | t (1 row) -- now assign a value SELECT assign_distributed_transaction_id(52, 52, '2015-01-01 00:00:00+0'); - assign_distributed_transaction_id ------------------------------------ - + assign_distributed_transaction_id +--------------------------------------------------------------------- + (1 row) SELECT 5 / 0; @@ -70,51 +70,51 @@ ERROR: division by zero COMMIT; -- since the transaction errored, we should see the uninitialized values again SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+-------------------+---------- + initiator_node_identifier | transaction_number | transaction_stamp | ?column? +--------------------------------------------------------------------- 0 | 0 | | t (1 row) -- we should also see that a new connection means an uninitialized transaction id BEGIN; SELECT assign_distributed_transaction_id(52, 52, '2015-01-01 00:00:00+0'); - assign_distributed_transaction_id ------------------------------------ - + assign_distributed_transaction_id +--------------------------------------------------------------------- + (1 row) SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+------------------------------+---------- + initiator_node_identifier | transaction_number | transaction_stamp | ?column? +--------------------------------------------------------------------- 52 | 52 | Wed Dec 31 16:00:00 2014 PST | t (1 row) \c - - - :master_port SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+-------------------+---------- + initiator_node_identifier | transaction_number | transaction_stamp | ?column? +--------------------------------------------------------------------- 0 | 0 | | t (1 row) -- now show that PREPARE resets the distributed transaction id BEGIN; SELECT assign_distributed_transaction_id(120, 120, '2015-01-01 00:00:00+0'); - assign_distributed_transaction_id ------------------------------------ - + assign_distributed_transaction_id +--------------------------------------------------------------------- + (1 row) SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+------------------------------+---------- + initiator_node_identifier | transaction_number | transaction_stamp | ?column? +--------------------------------------------------------------------- 120 | 120 | Wed Dec 31 16:00:00 2014 PST | t (1 row) PREPARE TRANSACTION 'dist_xact_id_test'; -- after the prepare we should see that transaction id is cleared SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+-------------------+---------- + initiator_node_identifier | transaction_number | transaction_stamp | ?column? +--------------------------------------------------------------------- 0 | 0 | | t (1 row) @@ -130,9 +130,9 @@ $$ LANGUAGE sql; -- force the transaction ID to be used in a parallel plan BEGIN; SELECT assign_distributed_transaction_id(50, 1234567, '2016-01-01 00:00:00+0'); - assign_distributed_transaction_id ------------------------------------ - + assign_distributed_transaction_id +--------------------------------------------------------------------- + (1 row) -- create >8MB table @@ -144,8 +144,8 @@ SET LOCAL max_parallel_workers_per_gather TO 2; SET LOCAL parallel_tuple_cost TO 0; EXPLAIN (COSTS OFF) SELECT a FROM parallel_id_test WHERE a = parallel_worker_transaction_id_test(); - QUERY PLAN -------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Gather Workers Planned: 1 -> Parallel Seq Scan on parallel_id_test @@ -153,8 +153,8 @@ SELECT a FROM parallel_id_test WHERE a = parallel_worker_transaction_id_test(); (4 rows) SELECT a FROM parallel_id_test WHERE a = parallel_worker_transaction_id_test(); - a ---------- + a +--------------------------------------------------------------------- 1234567 1234567 (2 rows) diff --git a/src/test/regress/expected/multi_distribution_metadata.out b/src/test/regress/expected/multi_distribution_metadata.out index ebf001275..1e37ea6d1 100644 --- a/src/test/regress/expected/multi_distribution_metadata.out +++ b/src/test/regress/expected/multi_distribution_metadata.out @@ -51,9 +51,9 @@ CREATE TABLE events_hash ( name text ); SELECT create_distributed_table('events_hash', 'name', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- set shardstate of one replication from each shard to 0 (invalid value) @@ -61,15 +61,15 @@ UPDATE pg_dist_placement SET shardstate = 0 WHERE shardid BETWEEN 540000 AND 540 AND groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port); -- should see above shard identifiers SELECT load_shard_id_array('events_hash'); - load_shard_id_array -------------------------------- + load_shard_id_array +--------------------------------------------------------------------- {540000,540001,540002,540003} (1 row) -- should see array with first shard range SELECT load_shard_interval_array(540000, 0); - load_shard_interval_array ---------------------------- + load_shard_interval_array +--------------------------------------------------------------------- {-2147483648,-1073741825} (1 row) @@ -80,15 +80,15 @@ CREATE TABLE events_range ( name text ); SELECT master_create_distributed_table('events_range', 'name', 'range'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) -- create empty shard SELECT master_create_empty_shard('events_range'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 540004 (1 row) @@ -97,46 +97,46 @@ UPDATE pg_dist_shard SET shardmaxvalue = 'Zebra' WHERE shardid = 540004; SELECT load_shard_interval_array(540004, ''::text); - load_shard_interval_array ---------------------------- + load_shard_interval_array +--------------------------------------------------------------------- {Aardvark,Zebra} (1 row) -- should see error for non-existent shard SELECT load_shard_interval_array(540005, 0); -ERROR: could not find valid entry for shard 540005 +ERROR: could not find valid entry for shard xxxxx -- should see two placements SELECT load_shard_placement_array(540001, false); - load_shard_placement_array ------------------------------------ - {localhost:57637,localhost:57638} + load_shard_placement_array +--------------------------------------------------------------------- + {localhost:xxxxx,localhost:xxxxx} (1 row) -- only one of which is finalized SELECT load_shard_placement_array(540001, true); - load_shard_placement_array ----------------------------- - {localhost:57637} + load_shard_placement_array +--------------------------------------------------------------------- + {localhost:xxxxx} (1 row) -- should see error for non-existent shard SELECT load_shard_placement_array(540001, false); - load_shard_placement_array ------------------------------------ - {localhost:57637,localhost:57638} + load_shard_placement_array +--------------------------------------------------------------------- + {localhost:xxxxx,localhost:xxxxx} (1 row) -- should see column id of 'name' SELECT partition_column_id('events_hash'); - partition_column_id ---------------------- + partition_column_id +--------------------------------------------------------------------- 2 (1 row) -- should see hash partition type and fail for non-distributed tables SELECT partition_type('events_hash'); - partition_type ----------------- + partition_type +--------------------------------------------------------------------- h (1 row) @@ -144,27 +144,27 @@ SELECT partition_type('pg_type'); ERROR: relation pg_type is not distributed -- should see true for events_hash, false for others SELECT is_distributed_table('events_hash'); - is_distributed_table ----------------------- + is_distributed_table +--------------------------------------------------------------------- t (1 row) SELECT is_distributed_table('pg_type'); - is_distributed_table ----------------------- + is_distributed_table +--------------------------------------------------------------------- f (1 row) SELECT is_distributed_table('pg_dist_shard'); - is_distributed_table ----------------------- + is_distributed_table +--------------------------------------------------------------------- f (1 row) -- test underlying column name-id translation SELECT column_name_to_column_id('events_hash', 'name'); - column_name_to_column_id --------------------------- + column_name_to_column_id +--------------------------------------------------------------------- 2 (1 row) @@ -181,8 +181,8 @@ DELETE FROM pg_dist_shard WHERE logicalrelid = 'events_range'::regclass; -- verify that an eager load shows them missing SELECT load_shard_id_array('events_hash'); - load_shard_id_array ---------------------- + load_shard_id_array +--------------------------------------------------------------------- {} (1 row) @@ -198,8 +198,8 @@ VALUES ('customers'::regclass, 'h', column_name_to_column('customers'::regclass, 'id')); SELECT partmethod, column_to_column_name(logicalrelid, partkey) FROM pg_dist_partition WHERE logicalrelid = 'customers'::regclass; - partmethod | column_to_column_name -------------+----------------------- + partmethod | column_to_column_name +--------------------------------------------------------------------- h | id (1 row) @@ -209,9 +209,9 @@ ERROR: not a valid column SELECT column_to_column_name('customers',''); ERROR: not a valid column SELECT column_to_column_name('pg_dist_node'::regclass, NULL); - column_to_column_name ------------------------ - + column_to_column_name +--------------------------------------------------------------------- + (1 row) SELECT column_to_column_name('pg_dist_node'::regclass,'{FROMEXPR :fromlist ({RANGETBLREF :rtindex 1 }) :quals <>}'); @@ -226,8 +226,8 @@ SELECT create_monolithic_shard_row('customers') AS new_shard_id \gset SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = :new_shard_id; - shardstorage | shardminvalue | shardmaxvalue ---------------+---------------+--------------- + shardstorage | shardminvalue | shardmaxvalue +--------------------------------------------------------------------- t | -2147483648 | 2147483647 (1 row) @@ -236,14 +236,14 @@ WHERE shardid = :new_shard_id; BEGIN; -- pick up a shard lock and look for it in pg_locks SELECT acquire_shared_shard_lock(5); - acquire_shared_shard_lock ---------------------------- - + acquire_shared_shard_lock +--------------------------------------------------------------------- + (1 row) SELECT objid, mode FROM pg_locks WHERE locktype = 'advisory' AND objid = 5; - objid | mode --------+----------- + objid | mode +--------------------------------------------------------------------- 5 | ShareLock (1 row) @@ -251,8 +251,8 @@ SELECT objid, mode FROM pg_locks WHERE locktype = 'advisory' AND objid = 5; COMMIT; -- lock should be gone now SELECT COUNT(*) FROM pg_locks WHERE locktype = 'advisory' AND objid = 5; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -260,55 +260,55 @@ SELECT COUNT(*) FROM pg_locks WHERE locktype = 'advisory' AND objid = 5; SET citus.shard_count TO 4; CREATE TABLE get_shardid_test_table1(column1 int, column2 int); SELECT create_distributed_table('get_shardid_test_table1', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \COPY get_shardid_test_table1 FROM STDIN with delimiter '|'; SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 1); - get_shard_id_for_distribution_column --------------------------------------- + get_shard_id_for_distribution_column +--------------------------------------------------------------------- 540006 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 2); - get_shard_id_for_distribution_column --------------------------------------- + get_shard_id_for_distribution_column +--------------------------------------------------------------------- 540009 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 3); - get_shard_id_for_distribution_column --------------------------------------- + get_shard_id_for_distribution_column +--------------------------------------------------------------------- 540007 (1 row) -- verify result of the get_shard_id_for_distribution_column \c - - - :worker_1_port SELECT * FROM get_shardid_test_table1_540006; - column1 | column2 ----------+--------- + column1 | column2 +--------------------------------------------------------------------- 1 | 1 (1 row) SELECT * FROM get_shardid_test_table1_540009; - column1 | column2 ----------+--------- + column1 | column2 +--------------------------------------------------------------------- 2 | 2 (1 row) SELECT * FROM get_shardid_test_table1_540007; - column1 | column2 ----------+--------- + column1 | column2 +--------------------------------------------------------------------- 3 | 3 (1 row) \c - - - :master_port -- test non-existing value SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 4); - get_shard_id_for_distribution_column --------------------------------------- + get_shard_id_for_distribution_column +--------------------------------------------------------------------- 540007 (1 row) @@ -316,35 +316,35 @@ SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 4); SET citus.shard_count TO 4; CREATE TABLE get_shardid_test_table2(column1 text[], column2 int); SELECT create_distributed_table('get_shardid_test_table2', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \COPY get_shardid_test_table2 FROM STDIN with delimiter '|'; SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', '{a, b, c}'); - get_shard_id_for_distribution_column --------------------------------------- + get_shard_id_for_distribution_column +--------------------------------------------------------------------- 540013 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', '{d, e, f}'); - get_shard_id_for_distribution_column --------------------------------------- + get_shard_id_for_distribution_column +--------------------------------------------------------------------- 540011 (1 row) -- verify result of the get_shard_id_for_distribution_column \c - - - :worker_1_port SELECT * FROM get_shardid_test_table2_540013; - column1 | column2 ----------+--------- + column1 | column2 +--------------------------------------------------------------------- {a,b,c} | 1 (1 row) SELECT * FROM get_shardid_test_table2_540011; - column1 | column2 ----------+--------- + column1 | column2 +--------------------------------------------------------------------- {d,e,f} | 2 (1 row) @@ -364,9 +364,9 @@ SELECT get_shard_id_for_distribution_column('get_shardid_test_table3', 1); ERROR: relation is not distributed -- test append distributed table SELECT create_distributed_table('get_shardid_test_table3', 'column1', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table3', 1); @@ -374,73 +374,73 @@ ERROR: finding shard id of given distribution value is only supported for hash -- test reference table; CREATE TABLE get_shardid_test_table4(column1 int, column2 int); SELECT create_reference_table('get_shardid_test_table4'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- test NULL distribution column value for reference table SELECT get_shard_id_for_distribution_column('get_shardid_test_table4'); - get_shard_id_for_distribution_column --------------------------------------- + get_shard_id_for_distribution_column +--------------------------------------------------------------------- 540014 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', NULL); - get_shard_id_for_distribution_column --------------------------------------- + get_shard_id_for_distribution_column +--------------------------------------------------------------------- 540014 (1 row) -- test different data types for reference table SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', 1); - get_shard_id_for_distribution_column --------------------------------------- + get_shard_id_for_distribution_column +--------------------------------------------------------------------- 540014 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', 'a'); - get_shard_id_for_distribution_column --------------------------------------- + get_shard_id_for_distribution_column +--------------------------------------------------------------------- 540014 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', '{a, b, c}'); - get_shard_id_for_distribution_column --------------------------------------- + get_shard_id_for_distribution_column +--------------------------------------------------------------------- 540014 (1 row) -- test range distributed table CREATE TABLE get_shardid_test_table5(column1 int, column2 int); SELECT create_distributed_table('get_shardid_test_table5', 'column1', 'range'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- create worker shards SELECT master_create_empty_shard('get_shardid_test_table5'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 540015 (1 row) SELECT master_create_empty_shard('get_shardid_test_table5'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 540016 (1 row) SELECT master_create_empty_shard('get_shardid_test_table5'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 540017 (1 row) SELECT master_create_empty_shard('get_shardid_test_table5'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 540018 (1 row) @@ -450,55 +450,55 @@ UPDATE pg_dist_shard SET shardminvalue = 1001, shardmaxvalue = 2000 WHERE shardi UPDATE pg_dist_shard SET shardminvalue = 2001, shardmaxvalue = 3000 WHERE shardid = 540017; UPDATE pg_dist_shard SET shardminvalue = 3001, shardmaxvalue = 4000 WHERE shardid = 540018; SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 5); - get_shard_id_for_distribution_column --------------------------------------- + get_shard_id_for_distribution_column +--------------------------------------------------------------------- 540015 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 1111); - get_shard_id_for_distribution_column --------------------------------------- + get_shard_id_for_distribution_column +--------------------------------------------------------------------- 540016 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 2689); - get_shard_id_for_distribution_column --------------------------------------- + get_shard_id_for_distribution_column +--------------------------------------------------------------------- 540017 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 3248); - get_shard_id_for_distribution_column --------------------------------------- + get_shard_id_for_distribution_column +--------------------------------------------------------------------- 540018 (1 row) -- test non-existing value for range distributed tables SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 4001); - get_shard_id_for_distribution_column --------------------------------------- + get_shard_id_for_distribution_column +--------------------------------------------------------------------- 0 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', -999); - get_shard_id_for_distribution_column --------------------------------------- + get_shard_id_for_distribution_column +--------------------------------------------------------------------- 0 (1 row) SET citus.shard_count TO 2; CREATE TABLE events_table_count (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint); SELECT create_distributed_table('events_table_count', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE users_table_count (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint); SELECT create_distributed_table('users_table_count', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT relation_count_in_query($$-- we can support arbitrary subqueries within UNIONs @@ -572,8 +572,8 @@ GROUP BY types ORDER BY types;$$); - relation_count_in_query -------------------------- + relation_count_in_query +--------------------------------------------------------------------- 6 (1 row) diff --git a/src/test/regress/expected/multi_drop_extension.out b/src/test/regress/expected/multi_drop_extension.out index 384536c2e..6419ebd1a 100644 --- a/src/test/regress/expected/multi_drop_extension.out +++ b/src/test/regress/expected/multi_drop_extension.out @@ -5,9 +5,9 @@ SET citus.next_shard_id TO 550000; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- this emits a NOTICE message for every table we are dropping with our CASCADE. It would @@ -20,34 +20,34 @@ RESET client_min_messages; CREATE EXTENSION citus; -- re-add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) -- verify that a table can be created after the extension has been dropped and recreated CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT 1 FROM master_create_empty_shard('testtableddl'); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM testtableddl; - somecol | distributecol ----------+--------------- + somecol | distributecol +--------------------------------------------------------------------- (0 rows) DROP TABLE testtableddl; diff --git a/src/test/regress/expected/multi_dropped_column_aliases.out b/src/test/regress/expected/multi_dropped_column_aliases.out index 3744d71db..c3f250d62 100644 --- a/src/test/regress/expected/multi_dropped_column_aliases.out +++ b/src/test/regress/expected/multi_dropped_column_aliases.out @@ -2,14 +2,14 @@ -- table schema is modified via ALTER statements. SET citus.next_shard_id TO 620000; SELECT count(*) FROM customer; - count -------- + count +--------------------------------------------------------------------- 1000 (1 row) SELECT * FROM customer LIMIT 2; - c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment ------------+--------------------+--------------------------------+-------------+-----------------+-----------+--------------+----------------------------------------------------------------- + c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment +--------------------------------------------------------------------- 1 | Customer#000000001 | IVhzIApeRb ot,c,E | 15 | 25-989-741-2988 | 711.56 | BUILDING | to the even, regular platelets. regular, ironic epitaphs nag e 2 | Customer#000000002 | XSTf4,NCwDVaWNe6tEgvwfmRchLXak | 13 | 23-768-687-3665 | 121.65 | AUTOMOBILE | l accounts. blithely ironic theodolites integrate boldly: caref (2 rows) @@ -17,37 +17,37 @@ SELECT * FROM customer LIMIT 2; ALTER TABLE customer ADD COLUMN new_column1 INTEGER; ALTER TABLE customer ADD COLUMN new_column2 INTEGER; SELECT count(*) FROM customer; - count -------- + count +--------------------------------------------------------------------- 1000 (1 row) SELECT * FROM customer LIMIT 2; - c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment | new_column1 | new_column2 ------------+--------------------+--------------------------------+-------------+-----------------+-----------+--------------+-----------------------------------------------------------------+-------------+------------- - 1 | Customer#000000001 | IVhzIApeRb ot,c,E | 15 | 25-989-741-2988 | 711.56 | BUILDING | to the even, regular platelets. regular, ironic epitaphs nag e | | - 2 | Customer#000000002 | XSTf4,NCwDVaWNe6tEgvwfmRchLXak | 13 | 23-768-687-3665 | 121.65 | AUTOMOBILE | l accounts. blithely ironic theodolites integrate boldly: caref | | + c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment | new_column1 | new_column2 +--------------------------------------------------------------------- + 1 | Customer#000000001 | IVhzIApeRb ot,c,E | 15 | 25-989-741-2988 | 711.56 | BUILDING | to the even, regular platelets. regular, ironic epitaphs nag e | | + 2 | Customer#000000002 | XSTf4,NCwDVaWNe6tEgvwfmRchLXak | 13 | 23-768-687-3665 | 121.65 | AUTOMOBILE | l accounts. blithely ironic theodolites integrate boldly: caref | | (2 rows) ALTER TABLE customer DROP COLUMN new_column1; ALTER TABLE customer DROP COLUMN new_column2; SELECT count(*) FROM customer; - count -------- + count +--------------------------------------------------------------------- 1000 (1 row) SELECT * FROM customer LIMIT 2; - c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment ------------+--------------------+--------------------------------+-------------+-----------------+-----------+--------------+----------------------------------------------------------------- + c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment +--------------------------------------------------------------------- 1 | Customer#000000001 | IVhzIApeRb ot,c,E | 15 | 25-989-741-2988 | 711.56 | BUILDING | to the even, regular platelets. regular, ironic epitaphs nag e 2 | Customer#000000002 | XSTf4,NCwDVaWNe6tEgvwfmRchLXak | 13 | 23-768-687-3665 | 121.65 | AUTOMOBILE | l accounts. blithely ironic theodolites integrate boldly: caref (2 rows) -- Verify joins work with dropped columns. SELECT count(*) FROM customer, orders WHERE c_custkey = o_custkey; - count -------- + count +--------------------------------------------------------------------- 1956 (1 row) diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out index a1f19a3fe..251315e31 100644 --- a/src/test/regress/expected/multi_explain.out +++ b/src/test/regress/expected/multi_explain.out @@ -42,7 +42,7 @@ Sort Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_290000 lineitem @@ -61,7 +61,7 @@ Sort Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_290000 lineitem @@ -96,7 +96,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) "Tasks Shown": "One of 2", "Tasks": [ { - "Node": "host=localhost port=57638 dbname=regression", + "Node": "host=localhost port=xxxxx dbname=regression", "Remote Plan": [ [ { @@ -171,7 +171,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML) One of 2 - host=localhost port=57638 dbname=regression + host=localhost port=xxxxx dbname=regression @@ -216,40 +216,40 @@ t EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -- Plan: +- Plan: Node Type: "Sort" Parallel Aware: false - Sort Key: + Sort Key: - "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))" - "remote_scan.l_quantity" - Plans: + Plans: - Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" Parent Relationship: "Outer" Parallel Aware: false - Group Key: + Group Key: - "remote_scan.l_quantity" - Plans: + Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Adaptive" Parallel Aware: false - Distributed Query: - Job: + Distributed Query: + Job: Task Count: 2 Tasks Shown: "One of 2" - Tasks: - - Node: "host=localhost port=57638 dbname=regression" - Remote Plan: - - Plan: + Tasks: + - Node: "host=localhost port=xxxxx dbname=regression" + Remote Plan: + - Plan: Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" Parallel Aware: false - Group Key: + Group Key: - "l_quantity" - Plans: + Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" Parallel Aware: false @@ -268,7 +268,7 @@ Sort Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_290000 lineitem @@ -285,7 +285,7 @@ Sort (actual rows=50 loops=1) Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate (actual rows=50 loops=1) Group Key: l_quantity -> Seq Scan on lineitem_290000 lineitem (actual rows=6000 loops=1) @@ -299,7 +299,7 @@ Aggregate Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate Output: sum(l_quantity), sum(l_quantity), count(l_quantity) -> Seq Scan on public.lineitem_290000 lineitem @@ -316,7 +316,7 @@ Limit Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Sort Sort Key: lineitem.l_quantity @@ -333,7 +333,7 @@ Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Insert on lineitem_290000 citus_table_alias -> Values Scan on "*VALUES*" -- Test update @@ -345,7 +345,7 @@ Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_290000 lineitem -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 1) @@ -360,7 +360,7 @@ Custom Scan (Citus Adaptive) (actual rows=0 loops=1) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_290000 lineitem (actual rows=0 loops=1) -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (actual rows=0 loops=1) Index Cond: (l_orderkey = 1) @@ -375,7 +375,7 @@ Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Delete on lineitem_290000 lineitem -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 1) @@ -402,7 +402,7 @@ Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 5) SELECT true AS valid FROM explain_xml($$ @@ -419,7 +419,7 @@ Custom Scan (Citus Adaptive) Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Seq Scan on lineitem_290000 lineitem -- Test having EXPLAIN (COSTS FALSE, VERBOSE TRUE) @@ -433,7 +433,7 @@ Aggregate Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate Output: sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity) -> Seq Scan on public.lineitem_290000 lineitem @@ -452,7 +452,7 @@ HashAggregate Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Output: l_quantity, l_quantity Group Key: lineitem.l_quantity @@ -489,7 +489,7 @@ Aggregate Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> GroupAggregate Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) @@ -571,7 +571,7 @@ HashAggregate Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> GroupAggregate Group Key: subquery_top.hasdone -> Sort @@ -686,7 +686,7 @@ Sort Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> GroupAggregate Group Key: subquery_top.count_pay -> Sort @@ -785,7 +785,7 @@ Limit Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Sort Sort Key: (max(users.lastseen)) DESC @@ -814,7 +814,7 @@ Aggregate Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on lineitem_290001 lineitem Filter: (l_orderkey > 9030) @@ -832,19 +832,19 @@ Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_hash_part_360041 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_hash_part_360042 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_hash_part_360043 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360043 lineitem_hash_part -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_hash_part_360044 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part EXPLAIN (COSTS FALSE) @@ -855,12 +855,12 @@ Custom Scan (Citus Adaptive) Task Count: 2 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_hash_part_360041 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part Filter: ((l_orderkey = 1) OR (l_orderkey = 3)) -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_hash_part_360042 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part Filter: ((l_orderkey = 1) OR (l_orderkey = 3)) @@ -871,19 +871,19 @@ Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Delete on lineitem_hash_part_360041 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Delete on lineitem_hash_part_360042 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Delete on lineitem_hash_part_360043 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360043 lineitem_hash_part -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Delete on lineitem_hash_part_360044 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) @@ -899,12 +899,12 @@ Sort (actual rows=50 loops=1) Task Count: 2 Tasks Shown: All -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate (actual rows=50 loops=1) Group Key: l_quantity -> Seq Scan on lineitem_290000 lineitem (actual rows=6000 loops=1) -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate (actual rows=50 loops=1) Group Key: l_quantity -> Seq Scan on lineitem_290001 lineitem (actual rows=6000 loops=1) @@ -919,7 +919,7 @@ Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_hash_part_360041 lineitem_hash_part -> Hash Join Hash Cond: (lineitem_hash_part.l_orderkey = orders_hash_part.o_orderkey) @@ -935,7 +935,7 @@ Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Delete on lineitem_hash_part_360041 lineitem_hash_part -> Hash Join Hash Cond: (lineitem_hash_part.l_orderkey = orders_hash_part.o_orderkey) @@ -951,7 +951,7 @@ Aggregate Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on lineitem_290001 lineitem Filter: (l_orderkey > 9030) @@ -1089,21 +1089,21 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; -- Plan: +- Plan: Node Type: "Aggregate" Strategy: "Plain" Partial Mode: "Simple" Parallel Aware: false - Plans: + Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Task-Tracker" Parallel Aware: false - Distributed Query: - Job: + Distributed Query: + Job: Task Count: 1 Tasks Shown: "None, not supported for re-partition queries" - Dependent Jobs: + Dependent Jobs: - Map Task Count: 2 Merge Task Count: 1 -- test parallel aggregates @@ -1128,7 +1128,7 @@ Aggregate Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on lineitem_290000 lineitem -- ensure EXPLAIN EXECUTE doesn't crash @@ -1140,7 +1140,7 @@ Aggregate Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on lineitem_290001 lineitem Filter: (l_orderkey > 9030) @@ -1151,7 +1151,7 @@ Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..13.60 rows=4 width=5) Index Cond: (l_orderkey = 5) PREPARE real_time_executor_query AS @@ -1162,7 +1162,7 @@ Aggregate Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on lineitem_290001 lineitem Filter: (l_orderkey > 9030) @@ -1174,7 +1174,7 @@ Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..13.60 rows=4 width=5) Index Cond: (l_orderkey = 5) -- test explain in a transaction with alter table to test we use right connections @@ -1194,7 +1194,7 @@ Custom Scan (Citus INSERT ... SELECT via coordinator) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Seq Scan on orders_hash_part_360045 orders_hash_part SELECT true AS valid FROM explain_json($$ @@ -1211,7 +1211,7 @@ Custom Scan (Citus INSERT ... SELECT via coordinator) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Seq Scan on orders_hash_part_360045 orders_hash_part EXPLAIN (COSTS OFF) @@ -1262,7 +1262,7 @@ SELECT l_orderkey FROM series JOIN keys ON (s = l_orderkey) ORDER BY s; Custom Scan (Citus Adaptive) Output: remote_scan.l_orderkey - -> Distributed Subplan 60_1 + -> Distributed Subplan XXX_1 -> HashAggregate Output: remote_scan.l_orderkey Group Key: remote_scan.l_orderkey @@ -1271,20 +1271,20 @@ Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Output: l_orderkey Group Key: lineitem_hash_part.l_orderkey -> Seq Scan on public.lineitem_hash_part_360041 lineitem_hash_part Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment - -> Distributed Subplan 60_2 + -> Distributed Subplan XXX_2 -> Function Scan on pg_catalog.generate_series s Output: s Function Call: generate_series(1, 10) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Merge Join Output: intermediate_result_1.l_orderkey, intermediate_result.s Merge Cond: (intermediate_result.s = intermediate_result_1.l_orderkey) @@ -1293,13 +1293,13 @@ Custom Scan (Citus Adaptive) Sort Key: intermediate_result.s -> Function Scan on pg_catalog.read_intermediate_result intermediate_result Output: intermediate_result.s - Function Call: read_intermediate_result('60_2'::text, 'binary'::citus_copy_format) + Function Call: read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) -> Sort Output: intermediate_result_1.l_orderkey Sort Key: intermediate_result_1.l_orderkey -> Function Scan on pg_catalog.read_intermediate_result intermediate_result_1 Output: intermediate_result_1.l_orderkey - Function Call: read_intermediate_result('60_1'::text, 'binary'::citus_copy_format) + Function Call: read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) SELECT true AS valid FROM explain_json($$ WITH result AS ( SELECT l_quantity, count(*) count_quantity FROM lineitem diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 7a5cc7137..9857bd288 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -31,8 +31,8 @@ SELECT datname, datname = current_database(), usename = (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') FROM test.maintenance_worker(); - datname | ?column? | ?column? -------------+----------+---------- + datname | ?column? | ?column? +--------------------------------------------------------------------- regression | t | t (1 row) @@ -45,8 +45,8 @@ WHERE pgd.refclassid = 'pg_extension'::regclass AND pgd.refobjid = pge.oid AND pge.extname = 'citus' AND pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test'); - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -114,8 +114,8 @@ ALTER EXTENSION citus UPDATE TO '9.1-1'; ALTER EXTENSION citus UPDATE TO '9.2-1'; -- show running version SHOW citus.version; - citus.version ---------------- + citus.version +--------------------------------------------------------------------- 9.2devel (1 row) @@ -128,8 +128,8 @@ WHERE pgd.refclassid = 'pg_extension'::regclass AND pgd.refobjid = pge.oid AND pge.extname = 'citus' AND pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test'); - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -152,8 +152,8 @@ CREATE TABLE version_mismatch_table(column1 int); INSERT INTO version_mismatch_table(column1) VALUES(5); -- Test SELECT SELECT * FROM version_mismatch_table ORDER BY column1; - column1 ---------- + column1 +--------------------------------------------------------------------- 0 1 2 @@ -168,10 +168,10 @@ SELECT d.datname as "Name", pg_catalog.array_to_string(d.datacl, E'\n') AS "Access privileges" FROM pg_catalog.pg_database d ORDER BY 1; - Name | Owner | Access privileges -------------+----------+----------------------- - postgres | postgres | - regression | postgres | + Name | Owner | Access privileges +--------------------------------------------------------------------- + postgres | postgres | + regression | postgres | template0 | postgres | =c/postgres + | | postgres=CTc/postgres template1 | postgres | =c/postgres + @@ -216,8 +216,8 @@ ALTER EXTENSION citus UPDATE; -- if cache is invalidated succesfull, this \d should work without any problem \d List of relations - Schema | Name | Type | Owner ---------+------+------+------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -230,8 +230,8 @@ SELECT datname, datname = current_database(), usename = (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') FROM test.maintenance_worker(); - datname | ?column? | ?column? -------------+----------+---------- + datname | ?column? | ?column? +--------------------------------------------------------------------- regression | t | t (1 row) @@ -271,8 +271,8 @@ SELECT datname, datname = current_database(), usename = (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') FROM test.maintenance_worker(); - datname | ?column? | ?column? ----------+----------+---------- + datname | ?column? | ?column? +--------------------------------------------------------------------- another | t | t (1 row) @@ -308,8 +308,8 @@ SELECT * FROM test_deamon.maintenance_deamon_died('another'); - maintenance_deamon_died -------------------------- + maintenance_deamon_died +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_follower_configure_followers.out b/src/test/regress/expected/multi_follower_configure_followers.out index 27d157ef2..de87d311c 100644 --- a/src/test/regress/expected/multi_follower_configure_followers.out +++ b/src/test/regress/expected/multi_follower_configure_followers.out @@ -3,8 +3,8 @@ ALTER SYSTEM SET citus.use_secondary_nodes TO 'always'; ALTER SYSTEM SET citus.cluster_name TO 'second-cluster'; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) @@ -13,8 +13,8 @@ SELECT pg_reload_conf(); ALTER SYSTEM SET citus.use_secondary_nodes TO 'always'; ALTER SYSTEM SET citus.cluster_name TO 'second-cluster'; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) @@ -22,8 +22,8 @@ SELECT pg_reload_conf(); ALTER SYSTEM SET citus.use_secondary_nodes TO 'always'; ALTER SYSTEM SET citus.cluster_name TO 'second-cluster'; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_follower_dml.out b/src/test/regress/expected/multi_follower_dml.out index 6fdb91e71..08e84a8b4 100644 --- a/src/test/regress/expected/multi_follower_dml.out +++ b/src/test/regress/expected/multi_follower_dml.out @@ -1,9 +1,9 @@ \c - - - :master_port CREATE TABLE the_table (a int, b int, z bigserial); SELECT create_distributed_table('the_table', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE local (a int, b int); @@ -16,22 +16,22 @@ DETAIL: the database is in recovery mode SET citus.writable_standby_coordinator TO on; INSERT INTO the_table (a, b, z) VALUES (1, 2, 2); SELECT * FROM the_table; - a | b | z ----+---+--- + a | b | z +--------------------------------------------------------------------- 1 | 2 | 2 (1 row) UPDATE the_table SET z = 3 WHERE a = 1; SELECT * FROM the_table; - a | b | z ----+---+--- + a | b | z +--------------------------------------------------------------------- 1 | 2 | 3 (1 row) DELETE FROM the_table WHERE a = 1; SELECT * FROM the_table; - a | b | z ----+---+--- + a | b | z +--------------------------------------------------------------------- (0 rows) -- drawing from a sequence is not possible @@ -47,8 +47,8 @@ ERROR: cannot assign TransactionIds during recovery SET citus.multi_shard_commit_protocol TO '1pc'; INSERT INTO the_table (a, b, z) VALUES (2, 3, 4), (5, 6, 7); SELECT * FROM the_table ORDER BY a; - a | b | z ----+---+--- + a | b | z +--------------------------------------------------------------------- 2 | 3 | 4 5 | 6 | 7 (2 rows) @@ -56,8 +56,8 @@ SELECT * FROM the_table ORDER BY a; -- modifying CTEs are possible WITH del AS (DELETE FROM the_table RETURNING *) SELECT * FROM del ORDER BY a; - a | b | z ----+---+--- + a | b | z +--------------------------------------------------------------------- 2 | 3 | 4 5 | 6 | 7 (2 rows) @@ -65,8 +65,8 @@ SELECT * FROM del ORDER BY a; -- COPY is possible in 1PC mode COPY the_table (a, b, z) FROM STDIN WITH CSV; SELECT * FROM the_table ORDER BY a; - a | b | z -----+----+---- + a | b | z +--------------------------------------------------------------------- 10 | 10 | 10 11 | 11 | 11 (2 rows) @@ -83,8 +83,8 @@ BEGIN; INSERT INTO the_table (a, b, z) VALUES (1, 2, 2); ROLLBACK; SELECT * FROM the_table ORDER BY a; - a | b | z ----+---+--- + a | b | z +--------------------------------------------------------------------- (0 rows) -- we should still disallow writes to local tables @@ -103,8 +103,8 @@ INSERT INTO the_table (a, b, z) VALUES (1, 2, 3); ERROR: writing to worker nodes is not currently allowed DETAIL: citus.use_secondary_nodes is set to 'always' SELECT * FROM the_table ORDER BY a; - a | b | z ----+---+--- + a | b | z +--------------------------------------------------------------------- (0 rows) \c - - - :master_port diff --git a/src/test/regress/expected/multi_follower_select_statements.out b/src/test/regress/expected/multi_follower_select_statements.out index b43b172ec..3f4340d61 100644 --- a/src/test/regress/expected/multi_follower_select_statements.out +++ b/src/test/regress/expected/multi_follower_select_statements.out @@ -1,22 +1,22 @@ \c - - - :master_port -- do some setup SELECT 1 FROM master_add_node('localhost', :worker_1_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) CREATE TABLE the_table (a int, b int); SELECT create_distributed_table('the_table', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO the_table (a, b) VALUES (1, 1); @@ -27,9 +27,9 @@ CREATE TABLE stock ( s_order_cnt int NOT NULL ); SELECT create_distributed_table('stock','s_w_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO stock SELECT c, c, c FROM generate_series(1, 5) as c; @@ -37,8 +37,8 @@ INSERT INTO stock SELECT c, c, c FROM generate_series(1, 5) as c; -- is still in the default cluster and will send queries to the primary nodes \c - - - :follower_master_port SELECT * FROM the_table; - a | b ----+--- + a | b +--------------------------------------------------------------------- 1 | 1 1 | 2 (2 rows) @@ -48,8 +48,8 @@ from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- 3 | 3 4 | 4 5 | 5 @@ -60,30 +60,30 @@ order by s_i_id; -- (this is :follower_master_port but substitution doesn't work here) \c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always'" SELECT * FROM the_table; -ERROR: node group 1 does not have a secondary node +ERROR: node group does not have a secondary node -- add the secondary nodes and try again, the SELECT statement should work this time \c - - - :master_port SELECT 1 FROM master_add_node('localhost', :follower_worker_1_port, groupid => (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_1_port), noderole => 'secondary'); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :follower_worker_2_port, groupid => (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port), noderole => 'secondary'); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) \c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always'" -- now that we've added secondaries this should work SELECT * FROM the_table; - a | b ----+--- + a | b +--------------------------------------------------------------------- 1 | 1 1 | 2 (2 rows) @@ -93,8 +93,8 @@ from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - s_i_id | ordercount ---------+------------ + s_i_id | ordercount +--------------------------------------------------------------------- 3 | 3 4 | 4 5 | 5 @@ -106,8 +106,8 @@ FROM master_get_active_worker_nodes() ORDER BY node_name, node_port; - node_name | node_port ------------+----------- + node_name | node_port +--------------------------------------------------------------------- localhost | 9071 localhost | 9072 (2 rows) @@ -117,21 +117,21 @@ ORDER BY \c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" -- there are no secondary nodes in this cluster, so this should fail! SELECT * FROM the_table; -ERROR: there is a shard placement in node group 1 but there are no nodes in that group +ERROR: there is a shard placement in node group but there are no nodes in that group select s_i_id, sum(s_order_cnt) as ordercount from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; -ERROR: there is a shard placement in node group 1 but there are no nodes in that group +ERROR: there is a shard placement in node group but there are no nodes in that group -- now move the secondary nodes into the new cluster and see that the follower, finally -- correctly configured, can run select queries involving them \c - - - :master_port UPDATE pg_dist_node SET nodecluster = 'second-cluster' WHERE noderole = 'secondary'; \c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" SELECT * FROM the_table; - a | b ----+--- + a | b +--------------------------------------------------------------------- 1 | 1 1 | 2 (2 rows) diff --git a/src/test/regress/expected/multi_follower_task_tracker.out b/src/test/regress/expected/multi_follower_task_tracker.out index 8b09eb664..f6b75bd40 100644 --- a/src/test/regress/expected/multi_follower_task_tracker.out +++ b/src/test/regress/expected/multi_follower_task_tracker.out @@ -2,9 +2,9 @@ -- do some setup CREATE TABLE tab(a int, b int); SELECT create_distributed_table('tab', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO tab (a, b) VALUES (1, 1); @@ -12,8 +12,8 @@ INSERT INTO tab (a, b) VALUES (1, 2); \c - - - :follower_master_port RESET citus.task_executor_type; SELECT * FROM tab; - a | b ----+--- + a | b +--------------------------------------------------------------------- 1 | 1 1 | 2 (2 rows) diff --git a/src/test/regress/expected/multi_foreign_key.out b/src/test/regress/expected/multi_foreign_key.out index 37f717a43..2b612124e 100644 --- a/src/test/regress/expected/multi_foreign_key.out +++ b/src/test/regress/expected/multi_foreign_key.out @@ -7,9 +7,9 @@ SET citus.shard_count TO 32; -- create tables CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- test foreign constraint creation with not supported parameters @@ -47,9 +47,9 @@ HINT: Please change "citus.shard_replication_factor to 1". To learn more about DROP TABLE self_referencing_table; CREATE TABLE self_referencing_table(id int, ref_id int, PRIMARY KEY (id, ref_id)); SELECT create_distributed_table('self_referencing_table', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE self_referencing_table ADD CONSTRAINT fkey FOREIGN KEY(id,ref_id) REFERENCES self_referencing_table(id, ref_id); @@ -86,14 +86,14 @@ HINT: Please change "citus.shard_replication_factor to 1". To learn more about DROP TABLE referencing_table; DROP TABLE referenced_table; -- test foreign constraint creation on append and range distributed tables --- foreign keys are supported either in between distributed tables including the +-- foreign keys are supported either in between distributed tables including the -- distribution column or from distributed tables to reference tables. SET citus.shard_replication_factor TO 1; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY (id) REFERENCES referenced_table(id)); @@ -104,9 +104,9 @@ DROP TABLE referencing_table; DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT create_distributed_table('referenced_table', 'id', 'range'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE referencing_table(id int, ref_id int,FOREIGN KEY (id) REFERENCES referenced_table(id)); @@ -119,32 +119,32 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- test inserts -- test insert to referencing table while there is NO corresponding value in referenced table INSERT INTO referencing_table VALUES(1, 1); -ERROR: insert or update on table "referencing_table_1350129" violates foreign key constraint "referencing_table_ref_id_fkey_1350129" -DETAIL: Key (ref_id)=(1) is not present in table "referenced_table_1350097". -CONTEXT: while executing command on localhost:57638 +ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350129" +DETAIL: Key (ref_id)=(X) is not present in table "referenced_table_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx -- test insert to referencing while there is corresponding value in referenced table INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); -- test deletes -- test delete from referenced table while there is corresponding value in referencing table DELETE FROM referenced_table WHERE id = 1; -ERROR: update or delete on table "referenced_table_1350097" violates foreign key constraint "referencing_table_ref_id_fkey_1350129" on table "referencing_table_1350129" -DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350129". -CONTEXT: while executing command on localhost:57638 +ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350129" on table "referencing_table_xxxxxxx" +DETAIL: Key (id)=(X) is still referenced from table "referencing_table_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx -- test delete from referenced table while there is NO corresponding value in referencing table DELETE FROM referencing_table WHERE ref_id = 1; DELETE FROM referenced_table WHERE id = 1; @@ -154,8 +154,8 @@ INSERT INTO referencing_table VALUES(2, 2); TRUNCATE referenced_table CASCADE; NOTICE: truncate cascades to table "referencing_table" SELECT * FROM referencing_table; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- (0 rows) -- drop table for next tests @@ -166,15 +166,15 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE CASCADE); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- single shard cascading delete @@ -182,13 +182,13 @@ INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; SELECT * FROM referencing_table; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- (0 rows) SELECT * FROM referenced_table; - id | test_column -----+------------- + id | test_column +--------------------------------------------------------------------- (0 rows) -- multi shard cascading delete @@ -196,8 +196,8 @@ INSERT INTO referenced_table VALUES(2, 2); INSERT INTO referencing_table VALUES(2, 2); DELETE FROM referenced_table; SELECT * FROM referencing_table; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- (0 rows) -- multi shard cascading delete with alter table @@ -213,35 +213,35 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; -ERROR: update or delete on table "referenced_table_1350225" violates foreign key constraint "referencing_table_ref_id_fkey_1350257" on table "referencing_table_1350257" -DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350257". -CONTEXT: while executing command on localhost:57638 +ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350257" on table "referencing_table_xxxxxxx" +DETAIL: Key (id)=(X) is still referenced from table "referencing_table_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx BEGIN; DELETE FROM referenced_table WHERE id = 1; DELETE FROM referencing_table WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- (0 rows) SELECT * FROM referenced_table; - id | test_column -----+------------- + id | test_column +--------------------------------------------------------------------- (0 rows) DROP TABLE referencing_table; @@ -250,36 +250,36 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE RESTRICT); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); BEGIN; DELETE FROM referenced_table WHERE id = 1; -ERROR: update or delete on table "referenced_table_1350289" violates foreign key constraint "referencing_table_ref_id_fkey_1350321" on table "referencing_table_1350321" -DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350321". -CONTEXT: while executing command on localhost:57638 +ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350321" on table "referencing_table_xxxxxxx" +DETAIL: Key (id)=(X) is still referenced from table "referencing_table_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx DELETE FROM referencing_table WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT * FROM referencing_table; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- 1 | 1 (1 row) SELECT * FROM referenced_table; - id | test_column -----+------------- + id | test_column +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -289,36 +289,36 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); UPDATE referenced_table SET test_column = 10 WHERE id = 1; -ERROR: update or delete on table "referenced_table_1350353" violates foreign key constraint "referencing_table_ref_id_fkey_1350385" on table "referencing_table_1350385" -DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_1350385". -CONTEXT: while executing command on localhost:57638 +ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350385" on table "referencing_table_xxxxxxx" +DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx BEGIN; UPDATE referenced_table SET test_column = 10 WHERE id = 1; UPDATE referencing_table SET id = 10 WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- 10 | 1 (1 row) SELECT * FROM referenced_table; - id | test_column -----+------------- + id | test_column +--------------------------------------------------------------------- 1 | 10 (1 row) @@ -328,36 +328,36 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) ON UPDATE RESTRICT); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); BEGIN; UPDATE referenced_table SET test_column = 20 WHERE id = 1; -ERROR: update or delete on table "referenced_table_1350417" violates foreign key constraint "referencing_table_ref_id_fkey_1350449" on table "referencing_table_1350449" -DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_1350449". -CONTEXT: while executing command on localhost:57638 +ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350449" on table "referencing_table_xxxxxxx" +DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx UPDATE referencing_table SET id = 20 WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT * FROM referencing_table; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- 1 | 1 (1 row) SELECT * FROM referenced_table; - id | test_column -----+------------- + id | test_column +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -367,21 +367,21 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) MATCH SIMPLE); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO referencing_table VALUES(null, 2); SELECT * FROM referencing_table; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- | 2 (1 row) @@ -392,24 +392,24 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) MATCH FULL); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO referencing_table VALUES(null, 2); -ERROR: insert or update on table "referencing_table_1350600" violates foreign key constraint "referencing_table_ref_id_fkey_1350600" +ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350600" DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx SELECT * FROM referencing_table; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- (0 rows) DROP TABLE referencing_table; @@ -419,16 +419,16 @@ DROP TABLE referenced_table; SET citus.shard_count TO 4; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- verify that we skip foreign key validation when propagation is turned off @@ -472,16 +472,16 @@ DROP TABLE referencing_table; DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash', colocate_with => 'none'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id); @@ -493,15 +493,15 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- columns for the referenced table is empty @@ -521,27 +521,27 @@ ERROR: number of referencing and referenced columns for foreign key disagree -- test foreign constraint creation while existing tables does not satisfy the constraint INSERT INTO referencing_table VALUES(1, 1); ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id); -ERROR: insert or update on table "referencing_table_1350628" violates foreign key constraint "test_constraint_1350628" -DETAIL: Key (ref_id)=(1) is not present in table "referenced_table_1350624". -CONTEXT: while executing command on localhost:57637 +ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "test_constraint_1350628" +DETAIL: Key (ref_id)=(X) is not present in table "referenced_table_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx -- test foreign constraint with correct conditions DELETE FROM referencing_table WHERE ref_id = 1; ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id); -- test inserts -- test insert to referencing table while there is NO corresponding value in referenced table INSERT INTO referencing_table VALUES(1, 1); -ERROR: insert or update on table "referencing_table_1350628" violates foreign key constraint "test_constraint_1350628" -DETAIL: Key (ref_id)=(1) is not present in table "referenced_table_1350624". -CONTEXT: while executing command on localhost:57637 +ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "test_constraint_1350628" +DETAIL: Key (ref_id)=(X) is not present in table "referenced_table_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx -- test insert to referencing while there is corresponding value in referenced table INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); -- test deletes -- test delete from referenced table while there is corresponding value in referencing table DELETE FROM referenced_table WHERE id = 1; -ERROR: update or delete on table "referenced_table_1350624" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_1350628" -DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350628". -CONTEXT: while executing command on localhost:57637 +ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_xxxxxxx" +DETAIL: Key (id)=(X) is still referenced from table "referencing_table_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx -- test delete from referenced table while there is NO corresponding value in referencing table DELETE FROM referencing_table WHERE ref_id = 1; DELETE FROM referenced_table WHERE id = 1; @@ -554,13 +554,13 @@ INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; SELECT * FROM referencing_table; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- (0 rows) SELECT * FROM referenced_table; - id | test_column -----+------------- + id | test_column +--------------------------------------------------------------------- (0 rows) ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; @@ -569,21 +569,21 @@ ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; -ERROR: update or delete on table "referenced_table_1350624" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_1350628" -DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350628". -CONTEXT: while executing command on localhost:57637 +ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_xxxxxxx" +DETAIL: Key (id)=(X) is still referenced from table "referencing_table_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx BEGIN; DELETE FROM referenced_table WHERE id = 1; DELETE FROM referencing_table WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- (0 rows) SELECT * FROM referenced_table; - id | test_column -----+------------- + id | test_column +--------------------------------------------------------------------- (0 rows) ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; @@ -593,21 +593,21 @@ INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); BEGIN; DELETE FROM referenced_table WHERE id = 1; -ERROR: update or delete on table "referenced_table_1350624" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_1350628" -DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350628". -CONTEXT: while executing command on localhost:57637 +ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_xxxxxxx" +DETAIL: Key (id)=(X) is still referenced from table "referencing_table_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx DELETE FROM referencing_table WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT * FROM referencing_table; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- 1 | 1 (1 row) SELECT * FROM referenced_table; - id | test_column -----+------------- + id | test_column +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -615,22 +615,22 @@ ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- test ON UPDATE NO ACTION + DEFERABLE + INITIALLY DEFERRED ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED; UPDATE referenced_table SET test_column = 10 WHERE id = 1; -ERROR: update or delete on table "referenced_table_1350624" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_1350628" -DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_1350628". -CONTEXT: while executing command on localhost:57637 +ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_xxxxxxx" +DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx BEGIN; UPDATE referenced_table SET test_column = 10 WHERE id = 1; UPDATE referencing_table SET id = 10 WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- 10 | 1 (1 row) SELECT * FROM referenced_table; - id | test_column -----+------------- + id | test_column +--------------------------------------------------------------------- 1 | 10 (1 row) @@ -639,21 +639,21 @@ ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) ON UPDATE RESTRICT; BEGIN; UPDATE referenced_table SET test_column = 20 WHERE id = 1; -ERROR: update or delete on table "referenced_table_1350624" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_1350628" -DETAIL: Key (id, test_column)=(1, 10) is still referenced from table "referencing_table_1350628". -CONTEXT: while executing command on localhost:57637 +ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_xxxxxxx" +DETAIL: Key (id, test_column)=(1, 10) is still referenced from table "referencing_table_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx UPDATE referencing_table SET id = 20 WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT * FROM referencing_table; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- 10 | 1 (1 row) SELECT * FROM referenced_table; - id | test_column -----+------------- + id | test_column +--------------------------------------------------------------------- 1 | 10 (1 row) @@ -662,8 +662,8 @@ ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) MATCH SIMPLE; INSERT INTO referencing_table VALUES(null, 2); SELECT * FROM referencing_table ORDER BY 1,2; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- 10 | 1 | 2 (2 rows) @@ -673,12 +673,12 @@ ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- test MATCH FULL ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) MATCH FULL; INSERT INTO referencing_table VALUES(null, 2); -ERROR: insert or update on table "referencing_table_1350631" violates foreign key constraint "test_constraint_1350631" +ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "test_constraint_1350631" DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx SELECT * FROM referencing_table; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- 10 | 1 (1 row) @@ -690,15 +690,15 @@ DROP TABLE referenced_table; CREATE TABLE cyclic_reference_table1(id int, table2_id int, PRIMARY KEY(id, table2_id)); CREATE TABLE cyclic_reference_table2(id int, table1_id int, PRIMARY KEY(id, table1_id)); SELECT create_distributed_table('cyclic_reference_table1', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('cyclic_reference_table2', 'table1_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE cyclic_reference_table1 ADD CONSTRAINT cyclic_constraint1 FOREIGN KEY(id, table2_id) REFERENCES cyclic_reference_table2(table1_id, id) DEFERRABLE INITIALLY DEFERRED; @@ -707,7 +707,7 @@ ALTER TABLE cyclic_reference_table2 ADD CONSTRAINT cyclic_constraint2 FOREIGN KE INSERT INTO cyclic_reference_table1 VALUES(1, 1); ERROR: insert or update on table "cyclic_reference_table1_1350632" violates foreign key constraint "cyclic_constraint1_1350632" DETAIL: Key (id, table2_id)=(1, 1) is not present in table "cyclic_reference_table2_1350636". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- proper insertion to table with cyclic dependency BEGIN; INSERT INTO cyclic_reference_table1 VALUES(1, 1); @@ -715,14 +715,14 @@ INSERT INTO cyclic_reference_table2 VALUES(1, 1); COMMIT; -- verify that rows are actually inserted SELECT * FROM cyclic_reference_table1; - id | table2_id -----+----------- + id | table2_id +--------------------------------------------------------------------- 1 | 1 (1 row) SELECT * FROM cyclic_reference_table2; - id | table1_id -----+----------- + id | table1_id +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -749,20 +749,20 @@ COMMIT; -- test insertion to referencing table, we expect that to fail INSERT INTO transaction_referencing_table VALUES(1, 1); ERROR: insert or update on table "transaction_referencing_table" violates foreign key constraint "transaction_fk_constraint" -DETAIL: Key (ref_id)=(1) is not present in table "transaction_referenced_table". +DETAIL: Key (ref_id)=(X) is not present in table "transaction_referenced_table". -- proper insertion to both referenced and referencing tables INSERT INTO transaction_referenced_table VALUES(1); INSERT INTO transaction_referencing_table VALUES(1, 1); -- verify that rows are actually inserted SELECT * FROM transaction_referenced_table; - id ----- + id +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM transaction_referencing_table; - id | ref_id -----+-------- + id | ref_id +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -778,9 +778,9 @@ CREATE TABLE self_referencing_table1( FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_table1(id, other_column) ); SELECT create_distributed_table('self_referencing_table1', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- test insertion to self referencing table @@ -789,11 +789,11 @@ INSERT INTO self_referencing_table1 VALUES(1, 1, 1); INSERT INTO self_referencing_table1 VALUES(1, 2, 3); ERROR: insert or update on table "self_referencing_table1_1350640" violates foreign key constraint "self_referencing_table1_id_fkey_1350640" DETAIL: Key (id, other_column_ref)=(1, 3) is not present in table "self_referencing_table1_1350640". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- verify that rows are actually inserted SELECT * FROM self_referencing_table1; - id | other_column | other_column_ref -----+--------------+------------------ + id | other_column | other_column_ref +--------------------------------------------------------------------- 1 | 1 | 1 (1 row) @@ -802,9 +802,9 @@ DROP TABLE self_referencing_table1; -- test self referencing foreign key with ALTER TABLE CREATE TABLE self_referencing_table2(id int, other_column int, other_column_ref int, PRIMARY KEY(id, other_column)); SELECT create_distributed_table('self_referencing_table2', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE self_referencing_table2 ADD CONSTRAINT self_referencing_fk_constraint FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_table2(id, other_column); @@ -814,11 +814,11 @@ INSERT INTO self_referencing_table2 VALUES(1, 1, 1); INSERT INTO self_referencing_table2 VALUES(1, 2, 3); ERROR: insert or update on table "self_referencing_table2_1350644" violates foreign key constraint "self_referencing_fk_constraint_1350644" DETAIL: Key (id, other_column_ref)=(1, 3) is not present in table "self_referencing_table2_1350644". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- verify that rows are actually inserted SELECT * FROM self_referencing_table2; - id | other_column | other_column_ref -----+--------------+------------------ + id | other_column | other_column_ref +--------------------------------------------------------------------- 1 | 1 | 1 (1 row) @@ -828,9 +828,9 @@ DROP TABLE self_referencing_table2; -- test foreign key creation on CREATE TABLE from reference table CREATE TABLE referenced_by_reference_table(id int PRIMARY KEY, other_column int); SELECT create_distributed_table('referenced_by_reference_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE reference_table(id int, referencing_column int REFERENCES referenced_by_reference_table(id)); @@ -841,16 +841,16 @@ DETAIL: A reference table can only have reference keys to other reference table DROP TABLE reference_table; CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int); SELECT create_reference_table('reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE reference_table_second(id int, referencing_column int REFERENCES reference_table(id)); SELECT create_reference_table('reference_table_second'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- test foreign key creation on CREATE TABLE from reference table to local table @@ -870,18 +870,18 @@ CREATE TABLE self_referencing_reference_table( FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_reference_table(id, other_column) ); SELECT create_reference_table('self_referencing_reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- test foreign key creation on ALTER TABLE from reference table DROP TABLE reference_table; CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int); SELECT create_reference_table('reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE reference_table ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES referenced_by_reference_table(id); @@ -890,9 +890,9 @@ DETAIL: A reference table can only have reference keys to other reference table -- test foreign key creation on ALTER TABLE to reference table CREATE TABLE references_to_reference_table(id int, referencing_column int); SELECT create_distributed_table('references_to_reference_table', 'referencing_column'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE references_to_reference_table ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES reference_table(id); @@ -900,9 +900,9 @@ ALTER TABLE references_to_reference_table ADD CONSTRAINT fk FOREIGN KEY(referenc DROP TABLE reference_table_second; CREATE TABLE reference_table_second(id int, referencing_column int); SELECT create_reference_table('reference_table_second'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE reference_table_second ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES reference_table(id); @@ -913,9 +913,9 @@ DETAIL: drop cascades to constraint fk on table references_to_reference_table drop cascades to constraint fk on table reference_table_second CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int); SELECT create_reference_table('reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE reference_table ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES referenced_local_table(id); @@ -930,9 +930,9 @@ CREATE TABLE self_referencing_reference_table( PRIMARY KEY(id, other_column) ); SELECT create_reference_table('self_referencing_reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE self_referencing_reference_table ADD CONSTRAINT fk FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_reference_table(id, other_column); diff --git a/src/test/regress/expected/multi_foreign_key_relation_graph.out b/src/test/regress/expected/multi_foreign_key_relation_graph.out index fa7b48e3c..8345f0cfe 100644 --- a/src/test/regress/expected/multi_foreign_key_relation_graph.out +++ b/src/test/regress/expected/multi_foreign_key_relation_graph.out @@ -13,152 +13,152 @@ CREATE FUNCTION get_referenced_relation_id_list(Oid) -- Simple case with distributed tables CREATE TABLE dtt1(id int PRIMARY KEY); SELECT create_distributed_table('dtt1','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE dtt2(id int PRIMARY KEY REFERENCES dtt1(id)); SELECT create_distributed_table('dtt2','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE dtt3(id int PRIMARY KEY REFERENCES dtt2(id)); SELECT create_distributed_table('dtt3','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt1'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt2'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- dtt1 (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt3'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- dtt1 dtt2 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt1'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- dtt2 dtt3 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt2'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- dtt3 (1 row) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt3'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- (0 rows) CREATE TABLE dtt4(id int PRIMARY KEY); SELECT create_distributed_table('dtt4', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt4'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- (0 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt4'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- (0 rows) ALTER TABLE dtt4 ADD CONSTRAINT dtt4_fkey FOREIGN KEY (id) REFERENCES dtt3(id); SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt4'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- dtt1 dtt2 dtt3 (3 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt4'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt1'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt2'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- dtt1 (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt3'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- dtt1 dtt2 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt1'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- dtt2 dtt3 dtt4 (3 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt2'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- dtt3 dtt4 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt3'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- dtt4 (1 row) ALTER TABLE dtt4 DROP CONSTRAINT dtt4_fkey; SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt3'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- dtt1 dtt2 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt3'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt4'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- (0 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt4'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- (0 rows) -- some tests within transction blocks to make sure that @@ -169,33 +169,33 @@ CREATE TABLE test_3 (id int UNIQUE); CREATE TABLE test_4 (id int UNIQUE); CREATE TABLE test_5 (id int UNIQUE); SELECT create_distributed_Table('test_1', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_Table('test_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_Table('test_3', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_Table('test_4', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_Table('test_5', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE VIEW referential_integrity_summary AS @@ -218,42 +218,42 @@ CREATE VIEW referential_integrity_summary AS BEGIN; ALTER TABLE test_2 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_1(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- - 1 | test_1 | {test_2} | + n | table_name | referencing_relations | referenced_relations +--------------------------------------------------------------------- + 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} - 3 | test_3 | | - 4 | test_4 | | - 5 | test_5 | | + 3 | test_3 | | + 4 | test_4 | | + 5 | test_5 | | (5 rows) ALTER TABLE test_3 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_2(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- - 1 | test_1 | {test_2,test_3} | + n | table_name | referencing_relations | referenced_relations +--------------------------------------------------------------------- + 1 | test_1 | {test_2,test_3} | 2 | test_2 | {test_3} | {test_1} 3 | test_3 | | {test_2,test_1} - 4 | test_4 | | - 5 | test_5 | | + 4 | test_4 | | + 5 | test_5 | | (5 rows) ALTER TABLE test_4 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_3(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations ----+------------+------------------------+------------------------ - 1 | test_1 | {test_2,test_3,test_4} | + n | table_name | referencing_relations | referenced_relations +--------------------------------------------------------------------- + 1 | test_1 | {test_2,test_3,test_4} | 2 | test_2 | {test_3,test_4} | {test_1} 3 | test_3 | {test_4} | {test_2,test_1} 4 | test_4 | | {test_3,test_2,test_1} - 5 | test_5 | | + 5 | test_5 | | (5 rows) ALTER TABLE test_5 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_4(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations ----+------------+-------------------------------+------------------------------- - 1 | test_1 | {test_2,test_3,test_4,test_5} | + n | table_name | referencing_relations | referenced_relations +--------------------------------------------------------------------- + 1 | test_1 | {test_2,test_3,test_4,test_5} | 2 | test_2 | {test_3,test_4,test_5} | {test_1} 3 | test_3 | {test_4,test_5} | {test_2,test_1} 4 | test_4 | {test_5} | {test_3,test_2,test_1} @@ -265,42 +265,42 @@ ROLLBACK; BEGIN; ALTER TABLE test_2 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_1(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- - 1 | test_1 | {test_2} | + n | table_name | referencing_relations | referenced_relations +--------------------------------------------------------------------- + 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} - 3 | test_3 | | - 4 | test_4 | | - 5 | test_5 | | + 3 | test_3 | | + 4 | test_4 | | + 5 | test_5 | | (5 rows) ALTER TABLE test_4 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_3(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- - 1 | test_1 | {test_2} | + n | table_name | referencing_relations | referenced_relations +--------------------------------------------------------------------- + 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} - 3 | test_3 | {test_4} | + 3 | test_3 | {test_4} | 4 | test_4 | | {test_3} - 5 | test_5 | | + 5 | test_5 | | (5 rows) ALTER TABLE test_5 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_4(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- - 1 | test_1 | {test_2} | + n | table_name | referencing_relations | referenced_relations +--------------------------------------------------------------------- + 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} - 3 | test_3 | {test_4,test_5} | + 3 | test_3 | {test_4,test_5} | 4 | test_4 | {test_5} | {test_3} 5 | test_5 | | {test_4,test_3} (5 rows) ALTER TABLE test_3 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_2(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations ----+------------+-------------------------------+------------------------------- - 1 | test_1 | {test_2,test_3,test_4,test_5} | + n | table_name | referencing_relations | referenced_relations +--------------------------------------------------------------------- + 1 | test_1 | {test_2,test_3,test_4,test_5} | 2 | test_2 | {test_3,test_4,test_5} | {test_1} 3 | test_3 | {test_4,test_5} | {test_2,test_1} 4 | test_4 | {test_5} | {test_3,test_2,test_1} @@ -315,9 +315,9 @@ BEGIN; ALTER TABLE test_4 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_3(id); ALTER TABLE test_5 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_4(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations ----+------------+-------------------------------+------------------------------- - 1 | test_1 | {test_2,test_3,test_4,test_5} | + n | table_name | referencing_relations | referenced_relations +--------------------------------------------------------------------- + 1 | test_1 | {test_2,test_3,test_4,test_5} | 2 | test_2 | {test_3,test_4,test_5} | {test_1} 3 | test_3 | {test_4,test_5} | {test_2,test_1} 4 | test_4 | {test_5} | {test_3,test_2,test_1} @@ -326,11 +326,11 @@ BEGIN; ALTER TABLE test_3 DROP CONSTRAINT fkey_1; SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- - 1 | test_1 | {test_2} | + n | table_name | referencing_relations | referenced_relations +--------------------------------------------------------------------- + 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} - 3 | test_3 | {test_4,test_5} | + 3 | test_3 | {test_4,test_5} | 4 | test_4 | {test_5} | {test_3} 5 | test_5 | | {test_4,test_3} (5 rows) @@ -341,51 +341,51 @@ DROP TABLE test_1, test_2, test_3, test_4, test_5 CASCADE; BEGIN; CREATE TABLE test_1 (id int UNIQUE); SELECT create_distributed_Table('test_1', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_2 (id int UNIQUE, FOREIGN KEY(id) REFERENCES test_1(id)); SELECT create_distributed_Table('test_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- - 1 | test_1 | {test_2} | + n | table_name | referencing_relations | referenced_relations +--------------------------------------------------------------------- + 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} (2 rows) CREATE TABLE test_3 (id int UNIQUE, FOREIGN KEY(id) REFERENCES test_2(id)); SELECT create_distributed_Table('test_3', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- - 1 | test_1 | {test_2,test_3} | + n | table_name | referencing_relations | referenced_relations +--------------------------------------------------------------------- + 1 | test_1 | {test_2,test_3} | 2 | test_2 | {test_3} | {test_1} 3 | test_3 | | {test_2,test_1} (3 rows) CREATE TABLE test_4 (id int UNIQUE, FOREIGN KEY(id) REFERENCES test_3(id)); SELECT create_distributed_Table('test_4', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations ----+------------+------------------------+------------------------ - 1 | test_1 | {test_2,test_3,test_4} | + n | table_name | referencing_relations | referenced_relations +--------------------------------------------------------------------- + 1 | test_1 | {test_2,test_3,test_4} | 2 | test_2 | {test_3,test_4} | {test_1} 3 | test_3 | {test_4} | {test_2,test_1} 4 | test_4 | | {test_3,test_2,test_1} @@ -393,15 +393,15 @@ BEGIN; CREATE TABLE test_5 (id int UNIQUE, FOREIGN KEY(id) REFERENCES test_4(id)); SELECT create_distributed_Table('test_5', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations ----+------------+-------------------------------+------------------------------- - 1 | test_1 | {test_2,test_3,test_4,test_5} | + n | table_name | referencing_relations | referenced_relations +--------------------------------------------------------------------- + 1 | test_1 | {test_2,test_3,test_4,test_5} | 2 | test_2 | {test_3,test_4,test_5} | {test_1} 3 | test_3 | {test_4,test_5} | {test_2,test_1} 4 | test_4 | {test_5} | {test_3,test_2,test_1} @@ -417,9 +417,9 @@ BEGIN; ALTER TABLE test_4 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_3(id); ALTER TABLE test_5 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_4(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations ----+------------+-------------------------------+------------------------------- - 1 | test_1 | {test_2,test_3,test_4,test_5} | + n | table_name | referencing_relations | referenced_relations +--------------------------------------------------------------------- + 1 | test_1 | {test_2,test_3,test_4,test_5} | 2 | test_2 | {test_3,test_4,test_5} | {test_1} 3 | test_3 | {test_4,test_5} | {test_2,test_1} 4 | test_4 | {test_5} | {test_3,test_2,test_1} @@ -431,9 +431,9 @@ NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to constraint test_4_id_fkey on table test_4 drop cascades to constraint fkey_1 on table test_4 SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- - 1 | test_1 | {test_2} | + n | table_name | referencing_relations | referenced_relations +--------------------------------------------------------------------- + 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} (2 rows) @@ -445,57 +445,57 @@ BEGIN; SET search_path TO fkey_graph, fkey_intermediate_schema_1, fkey_intermediate_schema_2; CREATE TABLE fkey_intermediate_schema_1.test_6(id int PRIMARY KEY); SELECT create_distributed_table('fkey_intermediate_schema_1.test_6', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE fkey_intermediate_schema_2.test_7(id int PRIMARY KEY REFERENCES fkey_intermediate_schema_1.test_6(id)); SELECT create_distributed_table('fkey_intermediate_schema_2.test_7','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE fkey_intermediate_schema_1.test_8(id int PRIMARY KEY REFERENCES fkey_intermediate_schema_2.test_7(id)); SELECT create_distributed_table('fkey_intermediate_schema_1.test_8', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_6'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- test_7 test_8 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_7'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- test_8 (1 row) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_8'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_6'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_7'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- test_6 (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_8'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- test_6 test_7 (2 rows) @@ -505,23 +505,23 @@ NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table test_7 drop cascades to constraint test_8_id_fkey on table test_8 SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_6'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- (0 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_8'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_6'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_8'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- (0 rows) ROLLBACK; @@ -531,57 +531,57 @@ BEGIN; SET search_path TO fkey_graph, fkey_intermediate_schema_1, fkey_intermediate_schema_2; CREATE TABLE fkey_intermediate_schema_1.test_6(id int PRIMARY KEY); SELECT create_distributed_table('fkey_intermediate_schema_1.test_6', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE fkey_intermediate_schema_2.test_7(id int PRIMARY KEY REFERENCES fkey_intermediate_schema_1.test_6(id)); SELECT create_distributed_table('fkey_intermediate_schema_2.test_7','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE fkey_intermediate_schema_1.test_8(id int PRIMARY KEY REFERENCES fkey_intermediate_schema_2.test_7(id)); SELECT create_distributed_table('fkey_intermediate_schema_1.test_8', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_6'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- test_7 test_8 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_7'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- test_8 (1 row) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_8'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_6'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_7'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- test_6 (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_8'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- test_6 test_7 (2 rows) @@ -592,13 +592,13 @@ DETAIL: drop cascades to table test_6 drop cascades to constraint test_7_id_fkey on table test_7 drop cascades to table test_8 SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_7'::regclass) ORDER BY 1; - get_referencing_relation_id_list ----------------------------------- + get_referencing_relation_id_list +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_7'::regclass) ORDER BY 1; - get_referenced_relation_id_list ---------------------------------- + get_referenced_relation_id_list +--------------------------------------------------------------------- (0 rows) ROLLBACK; diff --git a/src/test/regress/expected/multi_function_evaluation.out b/src/test/regress/expected/multi_function_evaluation.out index 52bedb6fc..5c656f331 100644 --- a/src/test/regress/expected/multi_function_evaluation.out +++ b/src/test/regress/expected/multi_function_evaluation.out @@ -3,28 +3,28 @@ -- SET citus.next_shard_id TO 1200000; -- many of the tests in this file is intended for testing non-fast-path --- router planner, so we're explicitly disabling it in this file. --- We've bunch of other tests that triggers fast-path-router +-- router planner, so we're explicitly disabling it in this file. +-- We've bunch of other tests that triggers fast-path-router SET citus.enable_fast_path_router_planner TO false; -- nextval() works (no good way to test DEFAULT, or, by extension, SERIAL) CREATE TABLE example (key INT, value INT); SELECT master_create_distributed_table('example', 'key', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE SEQUENCE example_value_seq; SELECT master_create_worker_shards('example', 1, 2); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) INSERT INTO example VALUES (1, nextval('example_value_seq')); SELECT * FROM example; - key | value ------+------- + key | value +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -33,11 +33,11 @@ PREPARE stmt AS INSERT INTO example VALUES (2); EXECUTE stmt; EXECUTE stmt; SELECT * FROM example; - key | value ------+------- + key | value +--------------------------------------------------------------------- 1 | 1 - 2 | - 2 | + 2 | + 2 | (3 rows) -- non-immutable functions inside CASE/COALESCE aren't allowed @@ -106,15 +106,15 @@ ALTER TABLE example ADD value timestamptz; INSERT INTO example VALUES (3, now()); UPDATE example SET value = timestamp '10-10-2000 00:00' WHERE key = 3 AND value > now() - interval '1 hour'; SELECT * FROM example WHERE key = 3; - key | value ------+------------------------------ + key | value +--------------------------------------------------------------------- 3 | Tue Oct 10 00:00:00 2000 PDT (1 row) DELETE FROM example WHERE key = 3 AND value < now() - interval '1 hour'; SELECT * FROM example WHERE key = 3; - key | value ------+------- + key | value +--------------------------------------------------------------------- (0 rows) -- test that function evaluation descends into expressions @@ -133,8 +133,8 @@ CONTEXT: PL/pgSQL function stable_fn() line 3 at RAISE NOTICE: stable_fn called CONTEXT: PL/pgSQL function stable_fn() line 3 at RAISE SELECT * FROM example WHERE key = 44; - key | value ------+------------------------------ + key | value +--------------------------------------------------------------------- 44 | Tue Oct 10 00:00:00 2000 PDT (1 row) diff --git a/src/test/regress/expected/multi_function_in_join.out b/src/test/regress/expected/multi_function_in_join.out index 886fe70c5..ac0ecd748 100644 --- a/src/test/regress/expected/multi_function_in_join.out +++ b/src/test/regress/expected/multi_function_in_join.out @@ -14,9 +14,9 @@ SET search_path TO 'functions_in_joins'; SET citus.next_shard_id TO 2500000; CREATE TABLE table1 (id int, data int); SELECT create_distributed_table('table1','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO table1 @@ -27,10 +27,10 @@ SET client_min_messages TO DEBUG1; -- Check joins on a sequence CREATE SEQUENCE numbers; SELECT * FROM table1 JOIN nextval('numbers') n ON (id = n) ORDER BY id ASC; -DEBUG: generating subplan 2_1 for subquery SELECT n FROM nextval('functions_in_joins.numbers'::regclass) n(n) -DEBUG: Plan 2 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, n.n FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.n FROM read_intermediate_result('2_1'::text, 'binary'::citus_copy_format) intermediate_result(n bigint)) n ON ((table1.id OPERATOR(pg_catalog.=) n.n))) ORDER BY table1.id - id | data | n -----+------+--- +DEBUG: generating subplan XXX_1 for subquery SELECT n FROM nextval('functions_in_joins.numbers'::regclass) n(n) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, n.n FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.n FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(n bigint)) n ON ((table1.id OPERATOR(pg_catalog.=) n.n))) ORDER BY table1.id + id | data | n +--------------------------------------------------------------------- 1 | 1 | 1 (1 row) @@ -39,10 +39,10 @@ CREATE FUNCTION add(integer, integer) RETURNS integer AS 'SELECT $1 + $2;' LANGUAGE SQL; SELECT * FROM table1 JOIN add(3,5) sum ON (id = sum) ORDER BY id ASC; -DEBUG: generating subplan 3_1 for subquery SELECT sum FROM functions_in_joins.add(3, 5) sum(sum) -DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, sum.sum FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.sum FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(sum integer)) sum ON ((table1.id OPERATOR(pg_catalog.=) sum.sum))) ORDER BY table1.id - id | data | sum -----+------+----- +DEBUG: generating subplan XXX_1 for subquery SELECT sum FROM functions_in_joins.add(3, 5) sum(sum) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, sum.sum FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum integer)) sum ON ((table1.id OPERATOR(pg_catalog.=) sum.sum))) ORDER BY table1.id + id | data | sum +--------------------------------------------------------------------- 8 | 64 | 8 (1 row) @@ -54,10 +54,10 @@ BEGIN END; $$ LANGUAGE plpgsql; SELECT * FROM table1 JOIN increment(2) val ON (id = val) ORDER BY id ASC; -DEBUG: generating subplan 4_1 for subquery SELECT val FROM functions_in_joins.increment(2) val(val) -DEBUG: Plan 4 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, val.val FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.val FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(val integer)) val ON ((table1.id OPERATOR(pg_catalog.=) val.val))) ORDER BY table1.id - id | data | val -----+------+----- +DEBUG: generating subplan XXX_1 for subquery SELECT val FROM functions_in_joins.increment(2) val(val) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, val.val FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.val FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(val integer)) val ON ((table1.id OPERATOR(pg_catalog.=) val.val))) ORDER BY table1.id + id | data | val +--------------------------------------------------------------------- 3 | 9 | 3 (1 row) @@ -73,10 +73,10 @@ $$ LANGUAGE plpgsql; SELECT * FROM table1 JOIN next_k_integers(3,2) next_integers ON (id = next_integers.result) ORDER BY id ASC; -DEBUG: generating subplan 5_1 for subquery SELECT result FROM functions_in_joins.next_k_integers(3, 2) next_integers(result) -DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, next_integers.result FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.result FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(result integer)) next_integers ON ((table1.id OPERATOR(pg_catalog.=) next_integers.result))) ORDER BY table1.id - id | data | result -----+------+-------- +DEBUG: generating subplan XXX_1 for subquery SELECT result FROM functions_in_joins.next_k_integers(3, 2) next_integers(result) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, next_integers.result FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.result FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(result integer)) next_integers ON ((table1.id OPERATOR(pg_catalog.=) next_integers.result))) ORDER BY table1.id + id | data | result +--------------------------------------------------------------------- 3 | 9 | 3 4 | 16 | 4 (2 rows) @@ -87,10 +87,10 @@ SELECT x, x+1 FROM generate_series(0,4) f(x) $cmd$ LANGUAGE SQL; SELECT * FROM table1 JOIN get_set_of_records() AS t2(x int, y int) ON (id = x) ORDER BY id ASC; -DEBUG: generating subplan 6_1 for subquery SELECT x, y FROM functions_in_joins.get_set_of_records() t2(x integer, y integer) -DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, t2.x, t2.y FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) t2 ON ((table1.id OPERATOR(pg_catalog.=) t2.x))) ORDER BY table1.id - id | data | x | y -----+------+---+--- +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM functions_in_joins.get_set_of_records() t2(x integer, y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, t2.x, t2.y FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) t2 ON ((table1.id OPERATOR(pg_catalog.=) t2.x))) ORDER BY table1.id + id | data | x | y +--------------------------------------------------------------------- 1 | 1 | 1 | 2 2 | 4 | 2 | 3 3 | 9 | 3 | 4 @@ -102,10 +102,10 @@ CREATE FUNCTION dup(int) RETURNS TABLE(f1 int, f2 text) AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ LANGUAGE SQL; SELECT f.* FROM table1 t JOIN dup(32) f ON (f1 = id); -DEBUG: generating subplan 7_1 for subquery SELECT f1, f2 FROM functions_in_joins.dup(32) f(f1, f2) -DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT f.f1, f.f2 FROM (functions_in_joins.table1 t JOIN (SELECT intermediate_result.f1, intermediate_result.f2 FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(f1 integer, f2 text)) f ON ((f.f1 OPERATOR(pg_catalog.=) t.id))) - f1 | f2 -----+------------ +DEBUG: generating subplan XXX_1 for subquery SELECT f1, f2 FROM functions_in_joins.dup(32) f(f1, f2) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT f.f1, f.f2 FROM (functions_in_joins.table1 t JOIN (SELECT intermediate_result.f1, intermediate_result.f2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(f1 integer, f2 text)) f ON ((f.f1 OPERATOR(pg_catalog.=) t.id))) + f1 | f2 +--------------------------------------------------------------------- 32 | 32 is text (1 row) @@ -113,17 +113,17 @@ DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT f.f1, f.f2 FROM CREATE OR REPLACE FUNCTION the_minimum_id() RETURNS INTEGER STABLE AS 'SELECT min(id) FROM table1' LANGUAGE SQL; SELECT * FROM table1 JOIN the_minimum_id() min_id ON (id = min_id); -DEBUG: generating subplan 8_1 for subquery SELECT min_id FROM functions_in_joins.the_minimum_id() min_id(min_id) -DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, min_id.min_id FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.min_id FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(min_id integer)) min_id ON ((table1.id OPERATOR(pg_catalog.=) min_id.min_id))) - id | data | min_id -----+------+-------- +DEBUG: generating subplan XXX_1 for subquery SELECT min_id FROM functions_in_joins.the_minimum_id() min_id(min_id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, min_id.min_id FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.min_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(min_id integer)) min_id ON ((table1.id OPERATOR(pg_catalog.=) min_id.min_id))) + id | data | min_id +--------------------------------------------------------------------- 1 | 1 | 1 (1 row) -- a built-in immutable function SELECT * FROM table1 JOIN abs(100) as hundred ON (id = hundred) ORDER BY id ASC; - id | data | hundred ------+-------+--------- + id | data | hundred +--------------------------------------------------------------------- 100 | 10000 | 100 (1 row) @@ -135,12 +135,12 @@ SELECT * FROM table1, next_row_to_process WHERE table1.data <= next_row_to_process.data ORDER BY 1,2 ASC; -DEBUG: generating subplan 11_1 for CTE next_row_to_process: SELECT table1.id, table1.data, n.n FROM (functions_in_joins.table1 JOIN nextval('functions_in_joins.numbers'::regclass) n(n) ON ((table1.id OPERATOR(pg_catalog.=) n.n))) -DEBUG: generating subplan 12_1 for subquery SELECT n FROM nextval('functions_in_joins.numbers'::regclass) n(n) -DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, n.n FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.n FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(n bigint)) n ON ((table1.id OPERATOR(pg_catalog.=) n.n))) -DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, next_row_to_process.id, next_row_to_process.data, next_row_to_process.n FROM functions_in_joins.table1, (SELECT intermediate_result.id, intermediate_result.data, intermediate_result.n FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, data integer, n bigint)) next_row_to_process WHERE (table1.data OPERATOR(pg_catalog.<=) next_row_to_process.data) ORDER BY table1.id, table1.data - id | data | id | data | n -----+------+----+------+--- +DEBUG: generating subplan XXX_1 for CTE next_row_to_process: SELECT table1.id, table1.data, n.n FROM (functions_in_joins.table1 JOIN nextval('functions_in_joins.numbers'::regclass) n(n) ON ((table1.id OPERATOR(pg_catalog.=) n.n))) +DEBUG: generating subplan XXX_1 for subquery SELECT n FROM nextval('functions_in_joins.numbers'::regclass) n(n) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, n.n FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.n FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(n bigint)) n ON ((table1.id OPERATOR(pg_catalog.=) n.n))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, next_row_to_process.id, next_row_to_process.data, next_row_to_process.n FROM functions_in_joins.table1, (SELECT intermediate_result.id, intermediate_result.data, intermediate_result.n FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, data integer, n bigint)) next_row_to_process WHERE (table1.data OPERATOR(pg_catalog.<=) next_row_to_process.data) ORDER BY table1.id, table1.data + id | data | id | data | n +--------------------------------------------------------------------- 1 | 1 | 2 | 4 | 2 2 | 4 | 2 | 4 | 2 (2 rows) @@ -148,10 +148,10 @@ DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT table1.id, tab -- Multiple functions in an RTE SELECT * FROM ROWS FROM (next_k_integers(5), next_k_integers(10)) AS f(a, b), table1 WHERE id = a ORDER BY id ASC; -DEBUG: generating subplan 13_1 for subquery SELECT a, b FROM ROWS FROM(functions_in_joins.next_k_integers(5), functions_in_joins.next_k_integers(10)) f(a, b) -DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT f.a, f.b, table1.id, table1.data FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) f(a, b), functions_in_joins.table1 WHERE (table1.id OPERATOR(pg_catalog.=) f.a) ORDER BY table1.id - a | b | id | data ----+----+----+------ +DEBUG: generating subplan XXX_1 for subquery SELECT a, b FROM ROWS FROM(functions_in_joins.next_k_integers(5), functions_in_joins.next_k_integers(10)) f(a, b) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT f.a, f.b, table1.id, table1.data FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) f(a, b), functions_in_joins.table1 WHERE (table1.id OPERATOR(pg_catalog.=) f.a) ORDER BY table1.id + a | b | id | data +--------------------------------------------------------------------- 5 | 10 | 5 | 25 6 | 11 | 6 | 36 7 | 12 | 7 | 49 @@ -174,17 +174,17 @@ begin end; $$ language plpgsql; SELECT * FROM table1 JOIN max_and_min() m ON (m.maximum = data OR m.minimum = data) ORDER BY 1,2,3,4; -DEBUG: generating subplan 14_1 for subquery SELECT minimum, maximum FROM functions_in_joins.max_and_min() m(minimum, maximum) -DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, m.minimum, m.maximum FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.minimum, intermediate_result.maximum FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(minimum integer, maximum integer)) m ON (((m.maximum OPERATOR(pg_catalog.=) table1.data) OR (m.minimum OPERATOR(pg_catalog.=) table1.data)))) ORDER BY table1.id, table1.data, m.minimum, m.maximum - id | data | minimum | maximum ------+-------+---------+--------- +DEBUG: generating subplan XXX_1 for subquery SELECT minimum, maximum FROM functions_in_joins.max_and_min() m(minimum, maximum) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, m.minimum, m.maximum FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.minimum, intermediate_result.maximum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(minimum integer, maximum integer)) m ON (((m.maximum OPERATOR(pg_catalog.=) table1.data) OR (m.minimum OPERATOR(pg_catalog.=) table1.data)))) ORDER BY table1.id, table1.data, m.minimum, m.maximum + id | data | minimum | maximum +--------------------------------------------------------------------- 1 | 1 | 1 | 10000 100 | 10000 | 1 | 10000 (2 rows) -- The following tests will fail as we do not support all joins on -- all kinds of functions --- In other words, we cannot recursively plan the functions and hence +-- In other words, we cannot recursively plan the functions and hence -- the query fails on the workers SET client_min_messages TO ERROR; \set VERBOSITY terse diff --git a/src/test/regress/expected/multi_generate_ddl_commands.out b/src/test/regress/expected/multi_generate_ddl_commands.out index aa949d167..aeabd222b 100644 --- a/src/test/regress/expected/multi_generate_ddl_commands.out +++ b/src/test/regress/expected/multi_generate_ddl_commands.out @@ -9,8 +9,8 @@ CREATE TABLE simple_table ( id bigint ); SELECT master_get_table_ddl_events('simple_table'); - master_get_table_ddl_events -------------------------------------------------------------------------------- + master_get_table_ddl_events +--------------------------------------------------------------------- CREATE TABLE public.simple_table (first_name text, last_name text, id bigint) ALTER TABLE public.simple_table OWNER TO postgres (2 rows) @@ -21,8 +21,8 @@ CREATE TABLE not_null_table ( id bigint not null ); SELECT master_get_table_ddl_events('not_null_table'); - master_get_table_ddl_events --------------------------------------------------------------------- + master_get_table_ddl_events +--------------------------------------------------------------------- CREATE TABLE public.not_null_table (city text, id bigint NOT NULL) ALTER TABLE public.not_null_table OWNER TO postgres (2 rows) @@ -34,8 +34,8 @@ CREATE TABLE column_constraint_table ( age int CONSTRAINT non_negative_age CHECK (age >= 0) ); SELECT master_get_table_ddl_events('column_constraint_table'); - master_get_table_ddl_events ------------------------------------------------------------------------------------------------------------------------------------------- + master_get_table_ddl_events +--------------------------------------------------------------------- CREATE TABLE public.column_constraint_table (first_name text, last_name text, age integer, CONSTRAINT non_negative_age CHECK (age >= 0)) ALTER TABLE public.column_constraint_table OWNER TO postgres (2 rows) @@ -48,8 +48,8 @@ CREATE TABLE table_constraint_table ( CONSTRAINT bids_ordered CHECK (min_bid > max_bid) ); SELECT master_get_table_ddl_events('table_constraint_table'); - master_get_table_ddl_events ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + master_get_table_ddl_events +--------------------------------------------------------------------- CREATE TABLE public.table_constraint_table (bid_item_id bigint, min_bid numeric NOT NULL, max_bid numeric NOT NULL, CONSTRAINT bids_ordered CHECK (min_bid > max_bid)) ALTER TABLE public.table_constraint_table OWNER TO postgres (2 rows) @@ -60,8 +60,8 @@ CREATE TABLE default_value_table ( price decimal default 0.00 ); SELECT master_get_table_ddl_events('default_value_table'); - master_get_table_ddl_events ---------------------------------------------------------------------------------- + master_get_table_ddl_events +--------------------------------------------------------------------- CREATE TABLE public.default_value_table (name text, price numeric DEFAULT 0.00) ALTER TABLE public.default_value_table OWNER TO postgres (2 rows) @@ -73,8 +73,8 @@ CREATE TABLE pkey_table ( id bigint PRIMARY KEY ); SELECT master_get_table_ddl_events('pkey_table'); - master_get_table_ddl_events --------------------------------------------------------------------------------------- + master_get_table_ddl_events +--------------------------------------------------------------------- CREATE TABLE public.pkey_table (first_name text, last_name text, id bigint NOT NULL) ALTER TABLE public.pkey_table OWNER TO postgres ALTER TABLE public.pkey_table ADD CONSTRAINT pkey_table_pkey PRIMARY KEY (id) @@ -86,8 +86,8 @@ CREATE TABLE unique_table ( username text UNIQUE not null ); SELECT master_get_table_ddl_events('unique_table'); - master_get_table_ddl_events --------------------------------------------------------------------------------------------- + master_get_table_ddl_events +--------------------------------------------------------------------- CREATE TABLE public.unique_table (user_id bigint NOT NULL, username text NOT NULL) ALTER TABLE public.unique_table OWNER TO postgres ALTER TABLE public.unique_table ADD CONSTRAINT unique_table_username_key UNIQUE (username) @@ -101,11 +101,11 @@ CREATE TABLE clustered_table ( CREATE INDEX clustered_time_idx ON clustered_table (received_at); CLUSTER clustered_table USING clustered_time_idx; SELECT master_get_table_ddl_events('clustered_table'); - master_get_table_ddl_events ------------------------------------------------------------------------------------------------------------- + master_get_table_ddl_events +--------------------------------------------------------------------- CREATE TABLE public.clustered_table (data json NOT NULL, received_at timestamp without time zone NOT NULL) ALTER TABLE public.clustered_table OWNER TO postgres - CREATE INDEX clustered_time_idx ON public.clustered_table USING btree (received_at) TABLESPACE pg_default + CREATE INDEX clustered_time_idx ON public.clustered_table USING btree (received_at) ALTER TABLE public.clustered_table CLUSTER ON clustered_time_idx (4 rows) @@ -123,8 +123,8 @@ ALTER TABLE fiddly_table ALTER traceroute SET STORAGE EXTERNAL, ALTER ip_addr SET STATISTICS 500; SELECT master_get_table_ddl_events('fiddly_table'); - master_get_table_ddl_events --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + master_get_table_ddl_events +--------------------------------------------------------------------- CREATE TABLE public.fiddly_table (hostname character(255) NOT NULL, os character(255) NOT NULL, ip_addr inet NOT NULL, traceroute text NOT NULL) ALTER TABLE ONLY public.fiddly_table ALTER COLUMN hostname SET STORAGE PLAIN, ALTER COLUMN os SET STORAGE MAIN, ALTER COLUMN ip_addr SET STORAGE EXTENDED, ALTER COLUMN ip_addr SET STATISTICS 500, ALTER COLUMN traceroute SET STORAGE EXTERNAL ALTER TABLE public.fiddly_table OWNER TO postgres @@ -137,9 +137,9 @@ CREATE FOREIGN TABLE foreign_table ( ) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true'); SELECT create_distributed_table('foreign_table', 'id'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER FOREIGN TABLE foreign_table rename to renamed_foreign_table; @@ -150,8 +150,8 @@ select table_name, column_name, data_type from information_schema.columns where table_schema='public' and table_name like 'renamed_foreign_table_%' and column_name <> 'id' order by table_name; - table_name | column_name | data_type -------------------------------+-------------+----------- + table_name | column_name | data_type +--------------------------------------------------------------------- renamed_foreign_table_610000 | rename_name | character renamed_foreign_table_610001 | rename_name | character renamed_foreign_table_610002 | rename_name | character @@ -161,8 +161,8 @@ order by table_name; \c - - - :master_port SELECT master_get_table_ddl_events('renamed_foreign_table'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined - master_get_table_ddl_events --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + master_get_table_ddl_events +--------------------------------------------------------------------- CREATE SERVER fake_fdw_server FOREIGN DATA WRAPPER fake_fdw CREATE FOREIGN TABLE public.renamed_foreign_table (id bigint NOT NULL, rename_name character(8) DEFAULT ''::text NOT NULL) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true') ALTER TABLE public.renamed_foreign_table OWNER TO postgres @@ -180,8 +180,8 @@ select table_name, column_name, data_type from information_schema.columns where table_schema='public' and table_name like 'renamed_foreign_table_%' and column_name <> 'id' order by table_name; - table_name | column_name | data_type -------------+-------------+----------- + table_name | column_name | data_type +--------------------------------------------------------------------- (0 rows) \c - - - :master_port diff --git a/src/test/regress/expected/multi_hash_pruning.out b/src/test/regress/expected/multi_hash_pruning.out index 759fe66db..f61625c85 100644 --- a/src/test/regress/expected/multi_hash_pruning.out +++ b/src/test/regress/expected/multi_hash_pruning.out @@ -6,8 +6,8 @@ SET citus.next_shard_id TO 630000; SET citus.shard_count to 4; SET citus.shard_replication_factor to 1; -- many of the tests in this file is intended for testing non-fast-path --- router planner, so we're explicitly disabling it in this file. --- We've bunch of other tests that triggers fast-path-router +-- router planner, so we're explicitly disabling it in this file. +-- We've bunch of other tests that triggers fast-path-router SET citus.enable_fast_path_router_planner TO false; -- Create a table partitioned on integer column and update partition type to -- hash. Then load data into this table and update shard min max values with @@ -24,9 +24,9 @@ CREATE TABLE orders_hash_partitioned ( o_shippriority integer, o_comment varchar(79) ); SELECT create_distributed_table('orders_hash_partitioned', 'o_orderkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET client_min_messages TO DEBUG2; @@ -34,8 +34,8 @@ SET client_min_messages TO DEBUG2; -- immutable functions. SELECT count(*) FROM orders_hash_partitioned; DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -43,8 +43,8 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -52,8 +52,8 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -61,8 +61,8 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 3; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 3 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -70,8 +70,8 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 4; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -80,8 +80,8 @@ SELECT count(*) FROM orders_hash_partitioned DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -89,8 +89,8 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1); DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -98,105 +98,105 @@ DETAIL: distribution column value: 1 SET citus.enable_router_execution TO 'false'; SELECT count(*) FROM orders_hash_partitioned; DEBUG: Router planner not enabled. - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1; DEBUG: Router planner not enabled. - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2; DEBUG: Router planner not enabled. - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 3; DEBUG: Router planner not enabled. - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 4; DEBUG: Router planner not enabled. - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 AND o_clerk = 'aaa'; DEBUG: Router planner not enabled. - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1); DEBUG: Router planner not enabled. - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SET citus.enable_router_execution TO DEFAULT; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is NULL; DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is not NULL; DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey > 2; DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_orderkey = 2; DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_clerk = 'aaa'; DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR (o_orderkey = 3 AND o_clerk = 'aaa'); DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_orderkey is NULL; DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -205,8 +205,8 @@ SELECT count(*) FROM DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -214,100 +214,100 @@ SET client_min_messages TO DEFAULT; -- Check that we support runing for ANY/IN with literal. SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY ('{1,2,3}'); - count -------- + count +--------------------------------------------------------------------- 13 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (1,2,3); - count -------- + count +--------------------------------------------------------------------- 13 (1 row) -- Check whether we can deal with null arrays SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (NULL); - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY (NULL); - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (NULL) OR TRUE; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY (NULL) OR TRUE; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) -- Check whether we support IN/ANY in subquery SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (SELECT l_orderkey FROM lineitem_hash_part); - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY (SELECT l_orderkey FROM lineitem_hash_part); - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) --- Check whether we support IN/ANY in subquery with append and range distributed table +-- Check whether we support IN/ANY in subquery with append and range distributed table SELECT count(*) FROM lineitem WHERE l_orderkey = ANY ('{1,2,3}'); - count -------- + count +--------------------------------------------------------------------- 13 (1 row) SELECT count(*) FROM lineitem WHERE l_orderkey IN (1,2,3); - count -------- + count +--------------------------------------------------------------------- 13 (1 row) SELECT count(*) FROM lineitem WHERE l_orderkey = ANY(NULL) OR TRUE; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) SELECT count(*) FROM lineitem_range WHERE l_orderkey = ANY ('{1,2,3}'); - count -------- + count +--------------------------------------------------------------------- 13 (1 row) SELECT count(*) FROM lineitem_range WHERE l_orderkey IN (1,2,3); - count -------- + count +--------------------------------------------------------------------- 13 (1 row) SELECT count(*) FROM lineitem_range WHERE l_orderkey = ANY(NULL) OR TRUE; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) @@ -317,34 +317,34 @@ SET client_min_messages TO DEBUG2; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey < ALL ('{1,2,3}'); DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 0 (1 row) --- Check that we don't give a spurious hint message when non-partition +-- Check that we don't give a spurious hint message when non-partition -- columns are used with ANY/IN/ALL SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_totalprice IN (2, 5); DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- Check that we cannot prune for mutable functions. SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = random(); DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = random() OR o_orderkey = 1; DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -353,8 +353,8 @@ SELECT count(*) FROM orders_hash_partitioned DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -375,8 +375,8 @@ DEBUG: join prunable for intervals [0,1073741823] and [1073741824,2147483647] DEBUG: join prunable for intervals [1073741824,2147483647] and [-2147483648,-1073741825] DEBUG: join prunable for intervals [1073741824,2147483647] and [-1073741824,-1] DEBUG: join prunable for intervals [1073741824,2147483647] and [0,1073741823] - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -388,8 +388,8 @@ SELECT count(*) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/multi_having_pushdown.out b/src/test/regress/expected/multi_having_pushdown.out index 9b072a0a3..7c15c3976 100644 --- a/src/test/regress/expected/multi_having_pushdown.out +++ b/src/test/regress/expected/multi_having_pushdown.out @@ -4,16 +4,16 @@ SET citus.next_shard_id TO 590000; CREATE TABLE lineitem_hash (LIKE lineitem); SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE orders_hash (LIKE orders); SELECT create_distributed_table('orders_hash', 'o_orderkey', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- push down when table is distributed by hash and grouped by partition column @@ -22,8 +22,8 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash GROUP BY l_orderkey HAVING sum(l_quantity) > 24 ORDER BY 2 DESC, 1 ASC LIMIT 3; - QUERY PLAN --------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.revenue DESC, remote_scan.l_orderkey @@ -31,7 +31,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Sort Sort Key: (sum((l_extendedprice * l_discount))) DESC, l_orderkey @@ -47,8 +47,8 @@ EXPLAIN (COSTS FALSE) FROM lineitem GROUP BY l_orderkey HAVING sum(l_quantity) > 24 ORDER BY 2 DESC, 1 ASC LIMIT 3; - QUERY PLAN ------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: (sum(remote_scan.revenue)) DESC, remote_scan.l_orderkey @@ -59,7 +59,7 @@ EXPLAIN (COSTS FALSE) Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_orderkey -> Seq Scan on lineitem_290000 lineitem @@ -71,8 +71,8 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash GROUP BY l_shipmode HAVING sum(l_quantity) > 24 ORDER BY 2 DESC, 1 ASC LIMIT 3; - QUERY PLAN ------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: (sum(remote_scan.revenue)) DESC, remote_scan.l_shipmode @@ -83,7 +83,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_shipmode -> Seq Scan on lineitem_hash_590000 lineitem_hash @@ -95,8 +95,8 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash GROUP BY l_shipmode, l_orderkey HAVING sum(l_quantity) > 24 ORDER BY 3 DESC, 1, 2 LIMIT 3; - QUERY PLAN --------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.revenue DESC, remote_scan.l_shipmode, remote_scan.l_orderkey @@ -104,7 +104,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Sort Sort Key: (sum((l_extendedprice * l_discount))) DESC, l_shipmode, l_orderkey @@ -121,8 +121,8 @@ EXPLAIN (COSTS FALSE) WHERE o_orderkey = l_orderkey GROUP BY l_orderkey, o_orderkey, l_shipmode HAVING sum(l_quantity) > 24 ORDER BY 1 DESC LIMIT 3; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.revenue DESC @@ -130,7 +130,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Sort Sort Key: (sum((lineitem_hash.l_extendedprice * lineitem_hash.l_discount))) DESC @@ -150,8 +150,8 @@ EXPLAIN (COSTS FALSE) WHERE o_orderkey = l_orderkey GROUP BY l_shipmode, o_clerk HAVING sum(l_quantity) > 24 ORDER BY 1 DESC LIMIT 3; - QUERY PLAN ------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: (sum(remote_scan.revenue)) DESC @@ -162,7 +162,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: lineitem_hash.l_shipmode, orders_hash.o_clerk -> Hash Join @@ -179,8 +179,8 @@ FROM users_table GROUP BY user_id HAVING max(value_2) > 4 AND min(value_2) < 1 ORDER BY 1; - max ------ + max +--------------------------------------------------------------------- 4 5 5 @@ -191,8 +191,8 @@ FROM users_table GROUP BY user_id HAVING max(value_2) > 4 AND min(value_2) < 1 OR count(*) > 10 ORDER BY 1; - max ------ + max +--------------------------------------------------------------------- 4 5 5 @@ -204,8 +204,8 @@ FROM users_table GROUP BY user_id HAVING max(value_2) > 4 AND min(value_2) < 1 AND count(*) > 20 ORDER BY 1; - max ------ + max +--------------------------------------------------------------------- 5 5 (2 rows) @@ -214,8 +214,8 @@ SELECT max(value_1) FROM users_table GROUP BY user_id HAVING max(value_2) > 0 AND count(*) FILTER (WHERE value_3=2) > 3 AND min(value_2) IN (0,1,2,3); - max ------ + max +--------------------------------------------------------------------- 5 (1 row) diff --git a/src/test/regress/expected/multi_index_statements.out b/src/test/regress/expected/multi_index_statements.out index 1c4e5b7d2..7d3515336 100644 --- a/src/test/regress/expected/multi_index_statements.out +++ b/src/test/regress/expected/multi_index_statements.out @@ -9,20 +9,20 @@ SET citus.next_shard_id TO 102080; CREATE TABLE index_test_range(a int, b int, c int); SELECT create_distributed_table('index_test_range', 'a', 'range'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_empty_shard('index_test_range'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 102080 (1 row) SELECT master_create_empty_shard('index_test_range'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 102081 (1 row) @@ -30,27 +30,27 @@ SET citus.shard_count TO 8; SET citus.shard_replication_factor TO 2; CREATE TABLE index_test_hash(a int, b int, c int); SELECT create_distributed_table('index_test_hash', 'a', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE index_test_append(a int, b int, c int); SELECT create_distributed_table('index_test_append', 'a', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_empty_shard('index_test_append'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 102090 (1 row) SELECT master_create_empty_shard('index_test_append'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 102091 (1 row) @@ -99,8 +99,8 @@ CLUSTER local_table USING local_table_index; DROP TABLE local_table; -- Verify that all indexes got created on the master node and one of the workers SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; - schemaname | tablename | indexname | tablespace | indexdef -------------+------------------+------------------------------------+------------+---------------------------------------------------------------------------------------------------------------------------- + schemaname | tablename | indexname | tablespace | indexdef +--------------------------------------------------------------------- public | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON public.index_test_hash USING btree (a) public | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON public.index_test_hash USING btree (a, b) public | index_test_hash | index_test_hash_index_a_b_c | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON public.index_test_hash USING btree (a) INCLUDE (b, c) @@ -121,26 +121,26 @@ SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_t \c - - - :worker_1_port SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1); - count -------- + count +--------------------------------------------------------------------- 9 (1 row) SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash%'; - count -------- + count +--------------------------------------------------------------------- 32 (1 row) SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range%'; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append%'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -177,8 +177,8 @@ CREATE INDEX ON lineitem (l_orderkey); ERROR: creating index without a name on a distributed table is currently unsupported -- Verify that none of failed indexes got created on the master node SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; - schemaname | tablename | indexname | tablespace | indexdef -------------+------------------+------------------------------------+------------+---------------------------------------------------------------------------------------------------------------------------- + schemaname | tablename | indexname | tablespace | indexdef +--------------------------------------------------------------------- public | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON public.index_test_hash USING btree (a) public | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON public.index_test_hash USING btree (a, b) public | index_test_hash | index_test_hash_index_a_b_c | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON public.index_test_hash USING btree (a) INCLUDE (b, c) @@ -237,25 +237,25 @@ DROP INDEX CONCURRENTLY lineitem_concurrently_index; -- Verify that all the indexes are dropped from the master and one worker node. -- As there's a primary key, so exclude those from this check. SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%'; - indrelid | indexrelid -----------+------------ + indrelid | indexrelid +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; - schemaname | tablename | indexname | tablespace | indexdef -------------+-----------------+-----------------------------+------------+---------------------------------------------------------------------------------------------------------- + schemaname | tablename | indexname | tablespace | indexdef +--------------------------------------------------------------------- public | index_test_hash | index_test_hash_index_a_b_c | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON public.index_test_hash USING btree (a) INCLUDE (b, c) (1 row) \c - - - :worker_1_port SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%'; - indrelid | indexrelid -----------+------------ + indrelid | indexrelid +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; - schemaname | tablename | indexname | tablespace | indexdef -------------+------------------------+------------------------------------+------------+------------------------------------------------------------------------------------------------------------------------ + schemaname | tablename | indexname | tablespace | indexdef +--------------------------------------------------------------------- public | index_test_hash_102082 | index_test_hash_index_a_b_c_102082 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102082 ON public.index_test_hash_102082 USING btree (a) INCLUDE (b, c) public | index_test_hash_102083 | index_test_hash_index_a_b_c_102083 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102083 ON public.index_test_hash_102083 USING btree (a) INCLUDE (b, c) public | index_test_hash_102084 | index_test_hash_index_a_b_c_102084 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102084 ON public.index_test_hash_102084 USING btree (a) INCLUDE (b, c) @@ -276,8 +276,8 @@ DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. -- the failure results in an INVALID index SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; - Index Valid? --------------- + Index Valid? +--------------------------------------------------------------------- f (1 row) @@ -285,8 +285,8 @@ SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx':: DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx; CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b); SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; - Index Valid? --------------- + Index Valid? +--------------------------------------------------------------------- t (1 row) @@ -300,8 +300,8 @@ DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. -- the failure results in an INVALID index SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; - Index Valid? --------------- + Index Valid? +--------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/multi_insert_select.out b/src/test/regress/expected/multi_insert_select.out index 9fc6a2c21..46053ab9d 100644 --- a/src/test/regress/expected/multi_insert_select.out +++ b/src/test/regress/expected/multi_insert_select.out @@ -11,38 +11,38 @@ SET citus.shard_replication_factor = 2; -- so be less verbose with \set VERBOSITY TERSE when necessary CREATE TABLE raw_events_first (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint, UNIQUE(user_id, value_1)); SELECT create_distributed_table('raw_events_first', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE raw_events_second (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint, UNIQUE(user_id, value_1)); SELECT create_distributed_table('raw_events_second', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE agg_events (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp, UNIQUE(user_id, value_1_agg)); SELECT create_distributed_table('agg_events', 'user_id');; - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- create the reference table as well CREATE TABLE reference_table (user_id int); SELECT create_reference_table('reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE insert_select_varchar_test (key varchar, value int); SELECT create_distributed_table('insert_select_varchar_test', 'key', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- set back to the defaults @@ -78,8 +78,8 @@ WHERE raw_events_first.user_id = raw_events_second.user_id ORDER BY user_id DESC; - user_id ---------- + user_id +--------------------------------------------------------------------- 6 5 4 @@ -91,7 +91,7 @@ ORDER BY -- see that we get unique vialitons \set VERBOSITY TERSE INSERT INTO raw_events_second SELECT * FROM raw_events_first; -ERROR: duplicate key value violates unique constraint "raw_events_second_user_id_value_1_key_13300004" +ERROR: duplicate key value violates unique constraint "raw_events_second_user_id_value_1_key_xxxxxxx" \set VERBOSITY DEFAULT -- stable functions should be allowed INSERT INTO raw_events_second (user_id, time) @@ -218,9 +218,9 @@ DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300002 raw_events_first WHERE ((value_3 OPERATOR(pg_catalog.=) (9000)::double precision) AND ((worker_hash(user_id) OPERATOR(pg_catalog.>=) 0) AND (worker_hash(user_id) OPERATOR(pg_catalog.<=) 1073741823))) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300003 raw_events_first WHERE ((value_3 OPERATOR(pg_catalog.=) (9000)::double precision) AND ((worker_hash(user_id) OPERATOR(pg_catalog.>=) 1073741824) AND (worker_hash(user_id) OPERATOR(pg_catalog.<=) 2147483647))) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 DEBUG: Plan is router executable - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- - 9 | | 90 | | 9000 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 9 | | 90 | | 9000 | (1 row) -- hits two shards @@ -238,7 +238,7 @@ DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300003 raw_events_first WHERE (((user_id OPERATOR(pg_catalog.=) 9) OR (user_id OPERATOR(pg_catalog.=) 16)) AND ((worker_hash(user_id) OPERATOR(pg_catalog.>=) 1073741824) AND (worker_hash(user_id) OPERATOR(pg_catalog.<=) 2147483647))) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 DEBUG: Plan is router executable -ERROR: duplicate key value violates unique constraint "raw_events_second_user_id_value_1_key_13300007" +ERROR: duplicate key value violates unique constraint "raw_events_second_user_id_value_1_key_xxxxxxx" -- now do some aggregations INSERT INTO agg_events SELECT @@ -266,7 +266,7 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_t DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300002 raw_events_first WHERE ((worker_hash(user_id) OPERATOR(pg_catalog.>=) 0) AND (worker_hash(user_id) OPERATOR(pg_catalog.<=) 1073741823)) GROUP BY value_2, user_id RETURNING citus_table_alias.user_id, citus_table_alias.value_1_agg, citus_table_alias.value_2_agg, citus_table_alias.value_3_agg, citus_table_alias.value_4_agg, citus_table_alias.agg_time DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300003 raw_events_first WHERE ((worker_hash(user_id) OPERATOR(pg_catalog.>=) 1073741824) AND (worker_hash(user_id) OPERATOR(pg_catalog.<=) 2147483647)) GROUP BY value_2, user_id RETURNING citus_table_alias.user_id, citus_table_alias.value_1_agg, citus_table_alias.value_2_agg, citus_table_alias.value_3_agg, citus_table_alias.value_4_agg, citus_table_alias.agg_time DEBUG: Plan is router executable -ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_13300008" +ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_xxxxxxx" -- some subquery tests INSERT INTO agg_events (value_1_agg, @@ -285,7 +285,7 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_t DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id)) foo WHERE ((worker_hash(id) OPERATOR(pg_catalog.>=) 0) AND (worker_hash(id) OPERATOR(pg_catalog.<=) 1073741823)) GROUP BY id ORDER BY id DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id)) foo WHERE ((worker_hash(id) OPERATOR(pg_catalog.>=) 1073741824) AND (worker_hash(id) OPERATOR(pg_catalog.<=) 2147483647)) GROUP BY id ORDER BY id DEBUG: Plan is router executable -ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_13300008" +ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_xxxxxxx" -- subquery one more level depth INSERT INTO agg_events (value_4_agg, @@ -307,7 +307,7 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_t DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE ((worker_hash(id) OPERATOR(pg_catalog.>=) 0) AND (worker_hash(id) OPERATOR(pg_catalog.<=) 1073741823)) ORDER BY id DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE ((worker_hash(id) OPERATOR(pg_catalog.>=) 1073741824) AND (worker_hash(id) OPERATOR(pg_catalog.<=) 2147483647)) ORDER BY id DEBUG: Plan is router executable -ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_13300008" +ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_xxxxxxx" \set VERBOSITY DEFAULT -- join between subqueries INSERT INTO agg_events @@ -491,9 +491,9 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS ae (use DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300002 raw_events_first WHERE ((worker_hash(user_id) OPERATOR(pg_catalog.>=) 0) AND (worker_hash(user_id) OPERATOR(pg_catalog.<=) 1073741823)) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time) RETURNING ae.user_id, ae.value_1_agg DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300003 raw_events_first WHERE ((worker_hash(user_id) OPERATOR(pg_catalog.>=) 1073741824) AND (worker_hash(user_id) OPERATOR(pg_catalog.<=) 2147483647)) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time) RETURNING ae.user_id, ae.value_1_agg DEBUG: Plan is router executable - user_id | value_1_agg ----------+------------- - 7 | + user_id | value_1_agg +--------------------------------------------------------------------- + 7 | (1 row) INSERT INTO agg_events (user_id, value_1_agg) @@ -554,10 +554,10 @@ SELECT t1.user_id AS col1, ON t1.user_id = t2.user_id ORDER BY t1.user_id, t2.user_id; - col1 | col2 -------+------ + col1 | col2 +--------------------------------------------------------------------- 1 | 1 - 2 | + 2 | 3 | 3 4 | 4 5 | 5 @@ -590,10 +590,10 @@ FROM agg_events ORDER BY user_id, value_1_agg; - user_id | value_1_agg ----------+------------- + user_id | value_1_agg +--------------------------------------------------------------------- 1 | 1 - 2 | + 2 | 3 | 3 4 | 4 5 | 5 @@ -635,15 +635,15 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: Router planner cannot handle multi-shard select queries SELECT user_id, value_1_agg FROM agg_events ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - user_id | value_1_agg ----------+------------- + user_id | value_1_agg +--------------------------------------------------------------------- 1 | 10 2 | 20 3 | 30 4 | 40 5 | 50 6 | 60 - 7 | + 7 | 8 | 80 9 | 90 (9 rows) @@ -666,15 +666,15 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_t DEBUG: Plan is router executable SELECT user_id, value_1_agg FROM agg_events ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - user_id | value_1_agg ----------+------------- + user_id | value_1_agg +--------------------------------------------------------------------- 1 | 10 2 | 20 3 | 30 4 | 40 5 | 50 6 | 60 - 7 | + 7 | 8 | 80 9 | 90 (9 rows) @@ -692,9 +692,9 @@ INSERT INTO agg_events DEBUG: distributed INSERT ... SELECT can only select from distributed tables DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 51_1 for CTE fist_table_agg: SELECT (max(value_1) OPERATOR(pg_catalog.+) 1) AS v1_agg, user_id FROM public.raw_events_first GROUP BY user_id +DEBUG: generating subplan XXX_1 for CTE fist_table_agg: SELECT (max(value_1) OPERATOR(pg_catalog.+) 1) AS v1_agg, user_id FROM public.raw_events_first GROUP BY user_id DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: Plan 51 query after replacing subqueries and CTEs: SELECT user_id, v1_agg FROM (SELECT fist_table_agg.v1_agg, fist_table_agg.user_id FROM (SELECT intermediate_result.v1_agg, intermediate_result.user_id FROM read_intermediate_result('51_1'::text, 'binary'::citus_copy_format) intermediate_result(v1_agg integer, user_id integer)) fist_table_agg) citus_insert_select_subquery +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, v1_agg FROM (SELECT fist_table_agg.v1_agg, fist_table_agg.user_id FROM (SELECT intermediate_result.v1_agg, intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(v1_agg integer, user_id integer)) fist_table_agg) citus_insert_select_subquery DEBUG: Creating router plan DEBUG: Plan is router executable ROLLBACK; @@ -708,8 +708,8 @@ INSERT INTO agg_events DEBUG: Subqueries without relations are not allowed in distributed INSERT ... SELECT queries DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 54_1 for CTE sub_cte: SELECT 1 -DEBUG: Plan 54 query after replacing subqueries and CTEs: SELECT user_id, (SELECT sub_cte."?column?" FROM (SELECT intermediate_result."?column?" FROM read_intermediate_result('54_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer)) sub_cte) FROM public.raw_events_first +DEBUG: generating subplan XXX_1 for CTE sub_cte: SELECT 1 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, (SELECT sub_cte."?column?" FROM (SELECT intermediate_result."?column?" FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer)) sub_cte) FROM public.raw_events_first DEBUG: Router planner cannot handle multi-shard select queries ERROR: could not run distributed query with subquery outside the FROM, WHERE and HAVING clauses HINT: Consider using an equality filter on the distributed table's partition column. @@ -737,13 +737,13 @@ DEBUG: Set operations are not allowed in distributed INSERT ... SELECT queries DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 58_1 for subquery SELECT user_id FROM public.raw_events_first +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.raw_events_first DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 58_2 for subquery SELECT user_id FROM public.raw_events_first +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.raw_events_first DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 58_3 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('58_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('58_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) -DEBUG: Plan 58 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('58_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) citus_insert_select_subquery +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) citus_insert_select_subquery DEBUG: Creating router plan DEBUG: Plan is router executable ROLLBACK; @@ -1078,8 +1078,8 @@ DEBUG: join prunable for intervals [0,1073741823] and [1073741824,2147483647] DEBUG: join prunable for intervals [1073741824,2147483647] and [-2147483648,-1073741825] DEBUG: join prunable for intervals [1073741824,2147483647] and [-1073741824,-1] DEBUG: join prunable for intervals [1073741824,2147483647] and [0,1073741823] -DEBUG: generating subplan 88_1 for subquery SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.value_3 AS id FROM public.raw_events_first, public.raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.value_3 -DEBUG: Plan 88 query after replacing subqueries and CTEs: SELECT id, v1, v4 FROM (SELECT intermediate_result.v4, intermediate_result.v1, intermediate_result.id FROM read_intermediate_result('88_1'::text, 'binary'::citus_copy_format) intermediate_result(v4 numeric, v1 bigint, id double precision)) foo +DEBUG: generating subplan XXX_1 for subquery SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.value_3 AS id FROM public.raw_events_first, public.raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.value_3 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT id, v1, v4 FROM (SELECT intermediate_result.v4, intermediate_result.v1, intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(v4 numeric, v1 bigint, id double precision)) foo DEBUG: Creating router plan DEBUG: Plan is router executable ERROR: the partition column of table public.agg_events cannot be NULL @@ -1219,8 +1219,8 @@ DEBUG: join prunable for intervals [0,1073741823] and [1073741824,2147483647] DEBUG: join prunable for intervals [1073741824,2147483647] and [-2147483648,-1073741825] DEBUG: join prunable for intervals [1073741824,2147483647] and [-1073741824,-1] DEBUG: join prunable for intervals [1073741824,2147483647] and [0,1073741823] -DEBUG: generating subplan 107_1 for subquery SELECT sum(raw_events_second.value_4) AS v4, raw_events_second.value_1 AS v1, sum(raw_events_second.user_id) AS id FROM public.raw_events_first, public.raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.value_1 HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric) -DEBUG: Plan 107 query after replacing subqueries and CTEs: SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first, public.reference_table WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT intermediate_result.v4, intermediate_result.v1, intermediate_result.id FROM read_intermediate_result('107_1'::text, 'binary'::citus_copy_format) intermediate_result(v4 numeric, v1 integer, id bigint)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum(raw_events_second.value_4) AS v4, raw_events_second.value_1 AS v1, sum(raw_events_second.user_id) AS id FROM public.raw_events_first, public.raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.value_1 HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first, public.reference_table WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT intermediate_result.v4, intermediate_result.v1, intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(v4 numeric, v1 integer, id bigint)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id))) DEBUG: Router planner cannot handle multi-shard select queries -- the second part of the query is not routable since -- GROUP BY not on the partition column (i.e., value_1) and thus join @@ -1264,8 +1264,8 @@ DEBUG: join prunable for intervals [0,1073741823] and [1073741824,2147483647] DEBUG: join prunable for intervals [1073741824,2147483647] and [-2147483648,-1073741825] DEBUG: join prunable for intervals [1073741824,2147483647] and [-1073741824,-1] DEBUG: join prunable for intervals [1073741824,2147483647] and [0,1073741823] -DEBUG: generating subplan 110_1 for subquery SELECT sum(raw_events_second.value_4) AS v4, raw_events_second.value_1 AS v1, sum(raw_events_second.user_id) AS id FROM public.raw_events_first, public.raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.value_1 HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric) -DEBUG: Plan 110 query after replacing subqueries and CTEs: SELECT f.id FROM ((SELECT foo.id FROM (SELECT raw_events_first.user_id AS id FROM public.raw_events_first, public.reference_table WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT intermediate_result.v4, intermediate_result.v1, intermediate_result.id FROM read_intermediate_result('110_1'::text, 'binary'::citus_copy_format) intermediate_result(v4 numeric, v1 integer, id bigint)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum(raw_events_second.value_4) AS v4, raw_events_second.value_1 AS v1, sum(raw_events_second.user_id) AS id FROM public.raw_events_first, public.raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.value_1 HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT f.id FROM ((SELECT foo.id FROM (SELECT raw_events_first.user_id AS id FROM public.raw_events_first, public.reference_table WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT intermediate_result.v4, intermediate_result.v1, intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(v4 numeric, v1 integer, id bigint)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id))) DEBUG: Router planner cannot handle multi-shard select queries -- cannot pushdown the query since the JOIN is not equi JOIN INSERT INTO agg_events @@ -1747,8 +1747,8 @@ BEGIN; COPY raw_events_second (user_id, value_1) FROM STDIN DELIMITER ','; INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 101; SELECT user_id FROM raw_events_first WHERE user_id = 101; - user_id ---------- + user_id +--------------------------------------------------------------------- 101 (1 row) @@ -1772,8 +1772,8 @@ CREATE VIEW test_view AS SELECT * FROM raw_events_first; INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (16, now(), 60, 600, 6000.1, 60000); SELECT count(*) FROM raw_events_second; - count -------- + count +--------------------------------------------------------------------- 36 (1 row) @@ -1782,8 +1782,8 @@ INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) (17, now(), 60, 600, 6000.1, 60000); INSERT INTO raw_events_second SELECT * FROM test_view WHERE user_id = 17 GROUP BY 1,2,3,4,5,6; SELECT count(*) FROM raw_events_second; - count -------- + count +--------------------------------------------------------------------- 38 (1 row) @@ -1801,8 +1801,8 @@ inserts AS ( NULL ) SELECT count(*) FROM inserts; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -1834,11 +1834,11 @@ SET client_min_messages TO DEBUG2; -- this should fail INSERT INTO raw_events_first SELECT * FROM raw_events_second; ERROR: cannot perform distributed planning for the given modification -DETAIL: Insert query cannot be executed on all placements for shard 13300000 +DETAIL: Insert query cannot be executed on all placements for shard xxxxx -- this should also fail INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 5; ERROR: cannot perform distributed planning for the given modification -DETAIL: Insert query cannot be executed on all placements for shard 13300000 +DETAIL: Insert query cannot be executed on all placements for shard xxxxx -- but this should work given that it hits different shard INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 6; DEBUG: Skipping target shard interval 13300000 since SELECT query for it pruned away @@ -1890,8 +1890,8 @@ FROM (SELECT f1.key WHERE f1.key = f2.key GROUP BY 1) AS foo; SELECT * FROM insert_select_varchar_test ORDER BY 1 DESC, 2 DESC; - key | value ---------+------- + key | value +--------------------------------------------------------------------- test_2 | 100 test_2 | 30 test_1 | 10 @@ -1911,9 +1911,9 @@ CREATE TABLE table_with_defaults -- we don't need many shards SET citus.shard_count = 2; SELECT create_distributed_table('table_with_defaults', 'store_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- let's see the queries @@ -2044,9 +2044,9 @@ CREATE TABLE table_with_serial ( s bigserial ); SELECT create_distributed_table('table_with_serial', 'store_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO table_with_serial (store_id) @@ -2064,21 +2064,21 @@ CREATE TABLE text_table (part_col text, val int); CREATE TABLE char_table (part_col char[], val int); create table table_with_starts_with_defaults (a int DEFAULT 5, b int, c int); SELECT create_distributed_table('text_table', 'part_col'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('char_table','part_col'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('table_with_starts_with_defaults', 'c'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET client_min_messages TO DEBUG; @@ -2159,22 +2159,22 @@ CREATE TABLE summary_table count BIGINT ); SELECT create_distributed_table('raw_table', 'time'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('summary_table', 'time'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO raw_table VALUES(1, '11-11-1980'); INSERT INTO summary_table SELECT time, COUNT(*) FROM raw_table GROUP BY time; SELECT * FROM summary_table; - time | count -------------+------- + time | count +--------------------------------------------------------------------- 11-11-1980 | 1 (1 row) @@ -2184,8 +2184,8 @@ TRUNCATE raw_events_first; INSERT INTO raw_events_first (user_id, value_1) SELECT * FROM (VALUES (1,2), (3,4), (5,6)) AS v(int,int); SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id; - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- 1 | 2 3 | 4 5 | 6 @@ -2204,8 +2204,8 @@ DEBUG: distributed INSERT ... SELECT can only select from distributed tables DEBUG: Collecting INSERT ... SELECT results on coordinator SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; DEBUG: Router planner cannot handle multi-shard select queries - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -2233,13 +2233,13 @@ DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS c DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300001 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_207_13300001'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_207_13300002'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300003 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_207_13300003'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- - 1 | | 11 | | | - 2 | | 12 | | | - 3 | | 13 | | | - 4 | | 14 | | | - 5 | | 15 | | | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | | 11 | | | + 2 | | 12 | | | + 3 | | 13 | | | + 4 | | 14 | | | + 5 | | 15 | | | (5 rows) RESET client_min_messages; @@ -2249,8 +2249,8 @@ BEGIN; INSERT INTO raw_events_first (user_id, value_1) SELECT s, s FROM generate_series(1, 5) s; SELECT user_id, value_1 FROM raw_events_first ORDER BY 1; - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -2265,8 +2265,8 @@ BEGIN; INSERT INTO raw_events_first (user_id, value_1) SELECT s, s FROM generate_series(1, 5) s; SELECT user_id, value_1 FROM raw_events_first WHERE user_id = 1; - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -2278,8 +2278,8 @@ SELECT s AS u, 2*s AS v FROM generate_series(1, 5) s; INSERT INTO raw_events_first (user_id, value_1) SELECT u, v FROM raw_events_first_local; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- 1 | 2 2 | 4 3 | 6 @@ -2292,8 +2292,8 @@ TRUNCATE raw_events_first; INSERT INTO raw_events_first (value_1, user_id) SELECT u, v FROM raw_events_first_local; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- 2 | 1 4 | 2 6 | 3 @@ -2308,8 +2308,8 @@ INSERT INTO raw_events_first (value_3, user_id) UNION ALL ( SELECT v, u FROM raw_events_first_local ); SELECT user_id, value_3 FROM raw_events_first ORDER BY user_id, value_3; - user_id | value_3 ----------+--------- + user_id | value_3 +--------------------------------------------------------------------- 1 | 2 1 | 2 2 | 4 @@ -2330,8 +2330,8 @@ SELECT s, 3*s FROM generate_series (1,5) s; INSERT INTO raw_events_first (user_id, value_1) SELECT user_id, value_4 FROM raw_events_second LIMIT 5; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- 1 | 3 2 | 6 3 | 9 @@ -2348,8 +2348,8 @@ INSERT INTO raw_events_first (user_id, value_1) WITH value AS (SELECT 1) SELECT * FROM removed_rows, value; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- 1 | 1 2 | 1 3 | 1 @@ -2380,8 +2380,8 @@ WITH ultra_rows AS ( ) SELECT u, v FROM ultra_rows; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- 1 | 2 2 | 4 3 | 6 @@ -2400,8 +2400,8 @@ WITH super_rows AS ( ) SELECT u, 5 FROM super_rows; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- 0 | 5 (1 row) @@ -2413,8 +2413,8 @@ WITH user_two AS ( INSERT INTO raw_events_first (user_id, value_1) SELECT * FROM user_two; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- 2 | 6 (1 row) @@ -2430,16 +2430,16 @@ SELECT * FROM numbers; -- Select into distributed table with a sequence CREATE TABLE "CaseSensitiveTable" ("UserID" int, "Value1" int); SELECT create_distributed_table('"CaseSensitiveTable"', 'UserID'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO "CaseSensitiveTable" SELECT s, s FROM generate_series(1,10) s; SELECT * FROM "CaseSensitiveTable" ORDER BY "UserID"; - UserID | Value1 ---------+-------- + UserID | Value1 +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -2456,17 +2456,17 @@ DROP TABLE "CaseSensitiveTable"; -- Select into distributed table with a sequence CREATE TABLE dist_table_with_sequence (user_id serial, value_1 serial); SELECT create_distributed_table('dist_table_with_sequence', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- from local query INSERT INTO dist_table_with_sequence (value_1) SELECT s FROM generate_series(1,5) s; SELECT * FROM dist_table_with_sequence ORDER BY user_id; - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -2479,8 +2479,8 @@ INSERT INTO dist_table_with_sequence (value_1) SELECT value_1 FROM dist_table_with_sequence; ERROR: INSERT ... SELECT cannot generate sequence values when selecting from a distributed table SELECT * FROM dist_table_with_sequence ORDER BY user_id; - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -2491,52 +2491,52 @@ SELECT * FROM dist_table_with_sequence ORDER BY user_id; -- Select from distributed table into reference table CREATE TABLE ref_table (user_id int, value_1 int); SELECT create_reference_table('ref_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO ref_table SELECT user_id, value_1 FROM raw_events_second; SELECT * FROM ref_table ORDER BY user_id, value_1; - user_id | value_1 ----------+--------- - 1 | - 2 | - 3 | - 4 | - 5 | + user_id | value_1 +--------------------------------------------------------------------- + 1 | + 2 | + 3 | + 4 | + 5 | (5 rows) DROP TABLE ref_table; -- Select from reference table into reference table CREATE TABLE ref1 (d timestamptz); SELECT create_reference_table('ref1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE ref2 (d date); SELECT create_reference_table('ref2'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO ref2 VALUES ('2017-10-31'); INSERT INTO ref1 SELECT * FROM ref2; SELECT count(*) from ref1; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- also test with now() INSERT INTO ref1 SELECT now() FROM ref2; SELECT count(*) from ref1; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -2545,9 +2545,9 @@ DROP TABLE ref2; -- Select into an append-partitioned table is not supported CREATE TABLE insert_append_table (user_id int, value_4 bigint); SELECT create_distributed_table('insert_append_table', 'user_id', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO insert_append_table (user_id, value_4) @@ -2566,8 +2566,8 @@ EXECUTE insert_prep(4); EXECUTE insert_prep(5); EXECUTE insert_prep(6); SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- 1 | 3 2 | 3 3 | 3 @@ -2581,8 +2581,8 @@ TRUNCATE raw_events_first; INSERT INTO test_view SELECT * FROM raw_events_second; SELECT user_id, value_4 FROM test_view ORDER BY user_id, value_4; - user_id | value_4 ----------+--------- + user_id | value_4 +--------------------------------------------------------------------- 1 | 3 2 | 6 3 | 9 @@ -2595,17 +2595,17 @@ DROP VIEW test_view; -- Make sure we handle dropped columns correctly CREATE TABLE drop_col_table (col1 text, col2 text, col3 text); SELECT create_distributed_table('drop_col_table', 'col2'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE drop_col_table DROP COLUMN col1; INSERT INTO drop_col_table (col3, col2) SELECT value_4, user_id FROM raw_events_second LIMIT 5; SELECT * FROM drop_col_table ORDER BY col2, col3; - col2 | col3 -------+------ + col2 | col3 +--------------------------------------------------------------------- 1 | 3 2 | 6 3 | 9 @@ -2615,8 +2615,8 @@ SELECT * FROM drop_col_table ORDER BY col2, col3; -- make sure the tuple went to the right shard SELECT * FROM drop_col_table WHERE col2 = '1'; - col2 | col3 -------+------ + col2 | col3 +--------------------------------------------------------------------- 1 | 3 (1 row) @@ -2624,16 +2624,16 @@ RESET client_min_messages; -- make sure casts are handled correctly CREATE TABLE coerce_events(user_id int, time timestamp, value_1 numeric); SELECT create_distributed_table('coerce_events', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE coerce_agg (user_id int, value_1_agg int); SELECT create_distributed_table('coerce_agg', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO coerce_events(user_id, value_1) VALUES (1, 1), (2, 2), (10, 10); @@ -2656,8 +2656,8 @@ FROM ( ) AS ftop LIMIT 5; SELECT * FROM coerce_agg ORDER BY 1 DESC, 2 DESC; - user_id | value_1_agg ----------+------------- + user_id | value_1_agg +--------------------------------------------------------------------- 10 | 10 10 | 10 2 | 2 @@ -2678,8 +2678,8 @@ FROM ( LIMIT 5; ERROR: value too long for type character(1) SELECT * FROM coerce_agg ORDER BY 1 DESC, 2 DESC; - user_id | value_1_agg ----------+------------- + user_id | value_1_agg +--------------------------------------------------------------------- (0 rows) TRUNCATE coerce_agg; @@ -2706,10 +2706,10 @@ FROM ( ) AS ftop LIMIT 5; SELECT * FROM coerce_agg ORDER BY 1 DESC, 2 DESC; - user_id | value_1_agg ----------+------------- - 2 | b - 1 | a + user_id | value_1_agg +--------------------------------------------------------------------- + 2 | b + 1 | a (2 rows) TRUNCATE coerce_agg; @@ -2729,8 +2729,8 @@ FROM ( ERROR: new row for relation "coerce_agg_13300060" violates check constraint "small_number_13300060" \set VERBOSITY DEFAULT SELECT * FROM coerce_agg ORDER BY 1 DESC, 2 DESC; - user_id | value_1_agg ----------+------------- + user_id | value_1_agg +--------------------------------------------------------------------- (0 rows) -- integer[3] -> text[3] @@ -2747,8 +2747,8 @@ FROM ( ) AS ftop LIMIT 5; SELECT * FROM coerce_agg ORDER BY 1 DESC, 2 DESC; - user_id | value_1_agg ----------+------------- + user_id | value_1_agg +--------------------------------------------------------------------- 2 | {2,2,2} 1 | {1,1,1} (2 rows) diff --git a/src/test/regress/expected/multi_insert_select_conflict.out b/src/test/regress/expected/multi_insert_select_conflict.out index 9c174a6fd..e1310c46b 100644 --- a/src/test/regress/expected/multi_insert_select_conflict.out +++ b/src/test/regress/expected/multi_insert_select_conflict.out @@ -4,51 +4,51 @@ SET citus.next_shard_id TO 1900000; SET citus.shard_replication_factor TO 1; CREATE TABLE target_table(col_1 int primary key, col_2 int); SELECT create_distributed_table('target_table','col_1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO target_table VALUES(1,2),(2,3),(3,4),(4,5),(5,6); CREATE TABLE source_table_1(col_1 int primary key, col_2 int, col_3 int); SELECT create_distributed_table('source_table_1','col_1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO source_table_1 VALUES(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5); CREATE TABLE source_table_2(col_1 int, col_2 int, col_3 int); SELECT create_distributed_table('source_table_2','col_1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO source_table_2 VALUES(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10); SET client_min_messages to debug1; -- Generate series directly on the coordinator and on conflict do nothing -INSERT INTO target_table (col_1, col_2) -SELECT - s, s -FROM - generate_series(1,10) s +INSERT INTO target_table (col_1, col_2) +SELECT + s, s +FROM + generate_series(1,10) s ON CONFLICT DO NOTHING; DEBUG: distributed INSERT ... SELECT can only select from distributed tables DEBUG: Collecting INSERT ... SELECT results on coordinator -- Generate series directly on the coordinator and on conflict update the target table -INSERT INTO target_table (col_1, col_2) -SELECT s, s -FROM - generate_series(1,10) s +INSERT INTO target_table (col_1, col_2) +SELECT s, s +FROM + generate_series(1,10) s ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1; DEBUG: distributed INSERT ... SELECT can only select from distributed tables DEBUG: Collecting INSERT ... SELECT results on coordinator -- Since partition columns do not match, pull the data to the coordinator -- and do not change conflicted values INSERT INTO target_table -SELECT - col_2, col_3 +SELECT + col_2, col_3 FROM source_table_1 ON CONFLICT DO NOTHING; @@ -60,19 +60,19 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator -- ordered result. WITH inserted_table AS ( INSERT INTO target_table - SELECT - col_2, col_3 + SELECT + col_2, col_3 FROM source_table_1 ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING * ) SELECT * FROM inserted_table ORDER BY 1; -DEBUG: generating subplan 8_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_2, col_3 FROM on_conflict.source_table_1 ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2 RETURNING target_table.col_1, target_table.col_2 +DEBUG: generating subplan XXX_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_2, col_3 FROM on_conflict.source_table_1 ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2 RETURNING target_table.col_1, target_table.col_2 DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: The target table's partition column should correspond to a partition column in the subquery. -DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1 DEBUG: Collecting INSERT ... SELECT results on coordinator - col_1 | col_2 --------+------- + col_1 | col_2 +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -82,11 +82,11 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator -- Subquery should be recursively planned due to the limit and do nothing on conflict INSERT INTO target_table -SELECT +SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table_1 LIMIT 5 @@ -95,32 +95,32 @@ ON CONFLICT DO NOTHING; DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: push down of limit count: 5 -DEBUG: generating subplan 12_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5 -DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo +DEBUG: generating subplan XXX_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo -- Subquery should be recursively planned due to the limit and update on conflict -- Query is wrapped by CTE to return ordered result. WITH inserted_table AS ( INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table_1 LIMIT 5 ) as foo ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING * ) SELECT * FROM inserted_table ORDER BY 1; -DEBUG: generating subplan 14_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM (SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1 LIMIT 5) foo ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2 RETURNING target_table.col_1, target_table.col_2 +DEBUG: generating subplan XXX_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM (SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1 LIMIT 5) foo ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2 RETURNING target_table.col_1, target_table.col_2 DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries -DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1 DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: push down of limit count: 5 -DEBUG: generating subplan 16_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5 -DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo - col_1 | col_2 --------+------- +DEBUG: generating subplan XXX_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo + col_1 | col_2 +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -131,11 +131,11 @@ DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT col_1, col_2 F -- Test with multiple subqueries. Query is wrapped by CTE to return ordered result. WITH inserted_table AS ( INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - (SELECT - col_1, col_2, col_3 + (SELECT + col_1, col_2, col_3 FROM source_table_1 LIMIT 5) @@ -148,18 +148,18 @@ WITH inserted_table AS ( ) as foo ON CONFLICT(col_1) DO UPDATE SET col_2 = 0 RETURNING * ) SELECT * FROM inserted_table ORDER BY 1; -DEBUG: generating subplan 18_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM ((SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1 LIMIT 5) UNION (SELECT source_table_2.col_1, source_table_2.col_2, source_table_2.col_3 FROM on_conflict.source_table_2 LIMIT 5)) foo ON CONFLICT(col_1) DO UPDATE SET col_2 = 0 RETURNING target_table.col_1, target_table.col_2 +DEBUG: generating subplan XXX_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM ((SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1 LIMIT 5) UNION (SELECT source_table_2.col_1, source_table_2.col_2, source_table_2.col_3 FROM on_conflict.source_table_2 LIMIT 5)) foo ON CONFLICT(col_1) DO UPDATE SET col_2 = 0 RETURNING target_table.col_1, target_table.col_2 DEBUG: Set operations are not allowed in distributed INSERT ... SELECT queries -DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1 DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: push down of limit count: 5 -DEBUG: generating subplan 20_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5 +DEBUG: generating subplan XXX_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5 DEBUG: push down of limit count: 5 -DEBUG: generating subplan 20_2 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_2 LIMIT 5 -DEBUG: generating subplan 20_3 for subquery SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) UNION SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('20_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) -DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('20_3'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo - col_1 | col_2 --------+------- +DEBUG: generating subplan XXX_2 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_2 LIMIT 5 +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) UNION SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo + col_1 | col_2 +--------------------------------------------------------------------- 1 | 0 2 | 0 3 | 0 @@ -179,8 +179,8 @@ WITH cte AS( INSERT INTO target_table SELECT * FROM cte ON CONFLICT DO NOTHING; DEBUG: distributed INSERT ... SELECT can only select from distributed tables DEBUG: Collecting INSERT ... SELECT results on coordinator -DEBUG: generating subplan 25_1 for CTE cte: SELECT col_1, col_2 FROM on_conflict.source_table_1 -DEBUG: Plan 25 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte.col_1, cte.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('25_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte) citus_insert_select_subquery +DEBUG: generating subplan XXX_1 for CTE cte: SELECT col_1, col_2 FROM on_conflict.source_table_1 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte.col_1, cte.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte) citus_insert_select_subquery -- Get the select part from cte and update on conflict WITH cte AS( SELECT col_1, col_2 FROM source_table_1 @@ -188,11 +188,11 @@ WITH cte AS( INSERT INTO target_table SELECT * FROM cte ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1; DEBUG: distributed INSERT ... SELECT can only select from distributed tables DEBUG: Collecting INSERT ... SELECT results on coordinator -DEBUG: generating subplan 28_1 for CTE cte: SELECT col_1, col_2 FROM on_conflict.source_table_1 -DEBUG: Plan 28 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte.col_1, cte.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('28_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte) citus_insert_select_subquery +DEBUG: generating subplan XXX_1 for CTE cte: SELECT col_1, col_2 FROM on_conflict.source_table_1 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte.col_1, cte.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte) citus_insert_select_subquery SELECT * FROM target_table ORDER BY 1; - col_1 | col_2 --------+------- + col_1 | col_2 +--------------------------------------------------------------------- 1 | 2 2 | 3 3 | 4 @@ -214,13 +214,13 @@ WITH cte AS( INSERT INTO target_table ((SELECT * FROM cte) UNION (SELECT * FROM cte_2)) ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1; DEBUG: distributed INSERT ... SELECT can only select from distributed tables DEBUG: Collecting INSERT ... SELECT results on coordinator -DEBUG: generating subplan 32_1 for CTE cte: SELECT col_1, col_2 FROM on_conflict.source_table_1 -DEBUG: generating subplan 32_2 for CTE cte_2: SELECT col_1, col_2 FROM on_conflict.source_table_2 -DEBUG: generating subplan 32_3 for subquery SELECT cte.col_1, cte.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('32_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte UNION SELECT cte_2.col_1, cte_2.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('32_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_2 -DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('32_3'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) citus_insert_select_subquery +DEBUG: generating subplan XXX_1 for CTE cte: SELECT col_1, col_2 FROM on_conflict.source_table_1 +DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT col_1, col_2 FROM on_conflict.source_table_2 +DEBUG: generating subplan XXX_3 for subquery SELECT cte.col_1, cte.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte UNION SELECT cte_2.col_1, cte_2.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_2 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) citus_insert_select_subquery SELECT * FROM target_table ORDER BY 1; - col_1 | col_2 --------+------- + col_1 | col_2 +--------------------------------------------------------------------- 1 | 2 2 | 3 3 | 4 @@ -241,15 +241,15 @@ WITH inserted_table AS ( ) INSERT INTO target_table SELECT * FROM cte_2 ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1 RETURNING * ) SELECT * FROM inserted_table ORDER BY 1; -DEBUG: generating subplan 37_1 for CTE inserted_table: WITH cte AS (SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1), cte_2 AS (SELECT cte.col_1, cte.col_2 FROM cte) INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM cte_2 ON CONFLICT(col_1) DO UPDATE SET col_2 = (excluded.col_2 OPERATOR(pg_catalog.+) 1) RETURNING target_table.col_1, target_table.col_2 +DEBUG: generating subplan XXX_1 for CTE inserted_table: WITH cte AS (SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1), cte_2 AS (SELECT cte.col_1, cte.col_2 FROM cte) INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM cte_2 ON CONFLICT(col_1) DO UPDATE SET col_2 = (excluded.col_2 OPERATOR(pg_catalog.+) 1) RETURNING target_table.col_1, target_table.col_2 DEBUG: distributed INSERT ... SELECT can only select from distributed tables -DEBUG: Plan 37 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('37_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1 DEBUG: Collecting INSERT ... SELECT results on coordinator -DEBUG: generating subplan 39_1 for CTE cte: SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 -DEBUG: generating subplan 39_2 for CTE cte_2: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('39_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) cte -DEBUG: Plan 39 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte_2.col_1, cte_2.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('39_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_2) citus_insert_select_subquery - col_1 | col_2 --------+------- +DEBUG: generating subplan XXX_1 for CTE cte: SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 +DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) cte +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte_2.col_1, cte_2.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_2) citus_insert_select_subquery + col_1 | col_2 +--------------------------------------------------------------------- 1 | 2 2 | 3 3 | 4 @@ -264,23 +264,23 @@ WITH cte AS ( INSERT INTO target_table (SELECT * FROM basic) ON CONFLICT DO NOTHING RETURNING * ) UPDATE target_table SET col_2 = 4 WHERE col_1 IN (SELECT col_1 FROM cte); -DEBUG: generating subplan 42_1 for CTE cte: WITH basic AS (SELECT source_table_1.col_1, source_table_1.col_2 FROM on_conflict.source_table_1) INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM basic ON CONFLICT DO NOTHING RETURNING target_table.col_1, target_table.col_2 +DEBUG: generating subplan XXX_1 for CTE cte: WITH basic AS (SELECT source_table_1.col_1, source_table_1.col_2 FROM on_conflict.source_table_1) INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM basic ON CONFLICT DO NOTHING RETURNING target_table.col_1, target_table.col_2 DEBUG: distributed INSERT ... SELECT can only select from distributed tables -DEBUG: Plan 42 query after replacing subqueries and CTEs: UPDATE on_conflict.target_table SET col_2 = 4 WHERE (col_1 OPERATOR(pg_catalog.=) ANY (SELECT cte.col_1 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE on_conflict.target_table SET col_2 = 4 WHERE (col_1 OPERATOR(pg_catalog.=) ANY (SELECT cte.col_1 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte)) DEBUG: Collecting INSERT ... SELECT results on coordinator -DEBUG: generating subplan 44_1 for CTE basic: SELECT col_1, col_2 FROM on_conflict.source_table_1 -DEBUG: Plan 44 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT basic.col_1, basic.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('44_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) basic) citus_insert_select_subquery +DEBUG: generating subplan XXX_1 for CTE basic: SELECT col_1, col_2 FROM on_conflict.source_table_1 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT basic.col_1, basic.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) basic) citus_insert_select_subquery RESET client_min_messages; -- Following query is not supported since error checks of the subquery pushdown planner -- and insert select planner have not been unified. It should work after unifying them. WITH cte AS ( - SELECT + SELECT col_1, col_2 - FROM + FROM source_table_1 -) -INSERT INTO target_table -SELECT +) +INSERT INTO target_table +SELECT source_table_1.col_1, source_table_1.col_2 FROM cte, source_table_1 @@ -290,9 +290,9 @@ DETAIL: Select query cannot be pushed down to the worker. -- Tests with foreign key to reference table CREATE TABLE test_ref_table (key int PRIMARY KEY); SELECT create_reference_table('test_ref_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_ref_table VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); @@ -300,26 +300,26 @@ ALTER TABLE target_table ADD CONSTRAINT fkey FOREIGN KEY (col_1) REFERENCES test BEGIN; TRUNCATE test_ref_table CASCADE; NOTICE: truncate cascades to table "target_table" - INSERT INTO - target_table - SELECT + INSERT INTO + target_table + SELECT col_2, col_1 FROM source_table_1 ON CONFLICT (col_1) DO UPDATE SET col_2 = 55 RETURNING *; -ERROR: insert or update on table "target_table_1900000" violates foreign key constraint "fkey_1900000" -DETAIL: Key (col_1)=(1) is not present in table "test_ref_table_1900012". -CONTEXT: while executing command on localhost:57637 +ERROR: insert or update on table "target_table_xxxxxxx" violates foreign key constraint "fkey_xxxxxxx" +DETAIL: Key (col_1)=(X) is not present in table "test_ref_table_xxxxxxx". +CONTEXT: while executing command on localhost:xxxxx ROLLBACK; BEGIN; DELETE FROM test_ref_table WHERE key > 10; - INSERT INTO + INSERT INTO target_table - SELECT - col_2, - col_1 + SELECT + col_2, + col_1 FROM source_table_1 ON CONFLICT (col_1) DO UPDATE SET col_2 = 1 RETURNING *; - col_1 | col_2 --------+------- + col_1 | col_2 +--------------------------------------------------------------------- 1 | 1 2 | 1 3 | 1 @@ -333,59 +333,59 @@ ROLLBACK; BEGIN; TRUNCATE test_ref_table CASCADE; NOTICE: truncate cascades to table "target_table" - INSERT INTO + INSERT INTO source_table_1 - SELECT + SELECT col_2, - col_1 + col_1 FROM target_table ON CONFLICT (col_1) DO UPDATE SET col_2 = 55 RETURNING *; - col_1 | col_2 | col_3 --------+-------+------- + col_1 | col_2 | col_3 +--------------------------------------------------------------------- (0 rows) ROLLBACK; BEGIN; DELETE FROM test_ref_table; - INSERT INTO + INSERT INTO source_table_1 - SELECT + SELECT col_2, col_1 FROM target_table ON CONFLICT (col_1) DO UPDATE SET col_2 = 55 RETURNING *; - col_1 | col_2 | col_3 --------+-------+------- + col_1 | col_2 | col_3 +--------------------------------------------------------------------- (0 rows) ROLLBACK; -- INSERT .. SELECT with different column types CREATE TABLE source_table_3(col_1 numeric, col_2 numeric, col_3 numeric); SELECT create_distributed_table('source_table_3','col_1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO source_table_3 VALUES(1,11,1),(2,22,2),(3,33,3),(4,44,4),(5,55,5); CREATE TABLE source_table_4(id int, arr_val text[]); SELECT create_distributed_table('source_table_4','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO source_table_4 VALUES(1, '{"abc","cde","efg"}'), (2, '{"xyz","tvu"}'); CREATE TABLE target_table_2(id int primary key, arr_val char(10)[]); SELECT create_distributed_table('target_table_2','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO target_table_2 VALUES(1, '{"abc","def","gyx"}'); SET client_min_messages to debug1; INSERT INTO target_table -SELECT - col_1, col_2 +SELECT + col_1, col_2 FROM source_table_3 ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2; @@ -393,8 +393,8 @@ DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition DETAIL: The data type of the target table's partition column should exactly match the data type of the corresponding simple column reference in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator SELECT * FROM target_table ORDER BY 1; - col_1 | col_2 --------+------- + col_1 | col_2 +--------------------------------------------------------------------- 1 | 11 2 | 22 3 | 33 @@ -408,14 +408,14 @@ SELECT * FROM target_table ORDER BY 1; (10 rows) INSERT INTO target_table_2 -SELECT - * +SELECT + * FROM source_table_4 ON CONFLICT DO NOTHING; SELECT * FROM target_table_2 ORDER BY 1; - id | arr_val -----+------------------------------------------ + id | arr_val +--------------------------------------------------------------------- 1 | {"abc ","def ","gyx "} 2 | {"xyz ","tvu "} (2 rows) @@ -426,45 +426,45 @@ SET citus.shard_replication_factor to 2; DROP TABLE target_table, source_table_1, source_table_2; CREATE TABLE target_table(col_1 int primary key, col_2 int); SELECT create_distributed_table('target_table','col_1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO target_table VALUES(1,2),(2,3),(3,4),(4,5),(5,6); CREATE TABLE source_table_1(col_1 int, col_2 int, col_3 int); SELECT create_distributed_table('source_table_1','col_1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO source_table_1 VALUES(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5); CREATE TABLE source_table_2(col_1 int, col_2 int, col_3 int); SELECT create_distributed_table('source_table_2','col_1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO source_table_2 VALUES(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10); SET client_min_messages to debug1; -- Generate series directly on the coordinator and on conflict do nothing -INSERT INTO target_table (col_1, col_2) -SELECT - s, s -FROM - generate_series(1,10) s +INSERT INTO target_table (col_1, col_2) +SELECT + s, s +FROM + generate_series(1,10) s ON CONFLICT DO NOTHING; DEBUG: distributed INSERT ... SELECT can only select from distributed tables DEBUG: Collecting INSERT ... SELECT results on coordinator -- Test with multiple subqueries INSERT INTO target_table -SELECT +SELECT col_1, col_2 FROM ( - (SELECT - col_1, col_2, col_3 + (SELECT + col_1, col_2, col_3 FROM source_table_1 LIMIT 5) @@ -479,14 +479,14 @@ ON CONFLICT(col_1) DO UPDATE SET col_2 = 0; DEBUG: Set operations are not allowed in distributed INSERT ... SELECT queries DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: push down of limit count: 5 -DEBUG: generating subplan 71_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5 +DEBUG: generating subplan XXX_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5 DEBUG: push down of limit count: 5 -DEBUG: generating subplan 71_2 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_2 LIMIT 5 -DEBUG: generating subplan 71_3 for subquery SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('71_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) UNION SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('71_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) -DEBUG: Plan 71 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('71_3'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo +DEBUG: generating subplan XXX_2 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_2 LIMIT 5 +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) UNION SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo SELECT * FROM target_table ORDER BY 1; - col_1 | col_2 --------+------- + col_1 | col_2 +--------------------------------------------------------------------- 1 | 0 2 | 0 3 | 0 @@ -507,12 +507,12 @@ WITH cte AS( INSERT INTO target_table SELECT * FROM cte_2 ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1; DEBUG: distributed INSERT ... SELECT can only select from distributed tables DEBUG: Collecting INSERT ... SELECT results on coordinator -DEBUG: generating subplan 77_1 for CTE cte: SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 -DEBUG: generating subplan 77_2 for CTE cte_2: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('77_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) cte -DEBUG: Plan 77 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte_2.col_1, cte_2.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('77_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_2) citus_insert_select_subquery +DEBUG: generating subplan XXX_1 for CTE cte: SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 +DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) cte +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte_2.col_1, cte_2.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_2) citus_insert_select_subquery SELECT * FROM target_table ORDER BY 1; - col_1 | col_2 --------+------- + col_1 | col_2 +--------------------------------------------------------------------- 1 | 2 2 | 3 3 | 4 diff --git a/src/test/regress/expected/multi_insert_select_non_pushable_queries.out b/src/test/regress/expected/multi_insert_select_non_pushable_queries.out index cff82f757..07280ec91 100644 --- a/src/test/regress/expected/multi_insert_select_non_pushable_queries.out +++ b/src/test/regress/expected/multi_insert_select_non_pushable_queries.out @@ -1,19 +1,19 @@ ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Insert into local table ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- CREATE TABLE test_table_1(id int); INSERT INTO test_table_1 SELECT user_id FROM users_table; ERROR: cannot INSERT rows from a distributed query into a local table HINT: Consider using CREATE TEMPORARY TABLE tmp AS SELECT ... and inserting from the temporary table. DROP TABLE test_table_1; ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Vanilla funnel query ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since the JOIN is not an equi join INSERT INTO agg_results_third (user_id, value_1_agg) SELECT user_id, array_length(events_table, 1) @@ -32,11 +32,11 @@ FROM ( ) q; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Funnel grouped by whether or not a user has done an event ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since the JOIN is not an equi join left part of the UNION -- is not equi join INSERT INTO agg_results_third (user_id, value_1_agg, value_2_agg ) @@ -160,18 +160,18 @@ FROM ( ) t GROUP BY user_id, hasdone_event; DEBUG: Set operations are not allowed in distributed INSERT ... SELECT queries DEBUG: Collecting INSERT ... SELECT results on coordinator -DEBUG: generating subplan 10_1 for subquery SELECT u.user_id, 'step=>1'::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.>=) 10) AND (u.user_id OPERATOR(pg_catalog.<=) 25) AND (e.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[100, 101, 102]))) -DEBUG: generating subplan 10_2 for subquery SELECT u.user_id, 'step=>2'::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.>=) 10) AND (u.user_id OPERATOR(pg_catalog.<=) 25) AND (e.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[103, 104, 105]))) -DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id, intermediate_result.event, intermediate_result."time" FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event text, "time" timestamp without time zone) UNION SELECT intermediate_result.user_id, intermediate_result.event, intermediate_result."time" FROM read_intermediate_result('10_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event text, "time" timestamp without time zone) -DEBUG: generating subplan 9_1 for subquery SELECT u.user_id, 'step=>1'::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.>=) 10) AND (u.user_id OPERATOR(pg_catalog.<=) 25) AND (e.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[100, 101, 102]))) UNION SELECT u.user_id, 'step=>2'::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.>=) 10) AND (u.user_id OPERATOR(pg_catalog.<=) 25) AND (e.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[103, 104, 105]))) +DEBUG: generating subplan XXX_1 for subquery SELECT u.user_id, 'step=>1'::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.>=) 10) AND (u.user_id OPERATOR(pg_catalog.<=) 25) AND (e.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[100, 101, 102]))) +DEBUG: generating subplan XXX_2 for subquery SELECT u.user_id, 'step=>2'::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.>=) 10) AND (u.user_id OPERATOR(pg_catalog.<=) 25) AND (e.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[103, 104, 105]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id, intermediate_result.event, intermediate_result."time" FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event text, "time" timestamp without time zone) UNION SELECT intermediate_result.user_id, intermediate_result.event, intermediate_result."time" FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event text, "time" timestamp without time zone) +DEBUG: generating subplan XXX_1 for subquery SELECT u.user_id, 'step=>1'::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.>=) 10) AND (u.user_id OPERATOR(pg_catalog.<=) 25) AND (e.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[100, 101, 102]))) UNION SELECT u.user_id, 'step=>2'::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.>=) 10) AND (u.user_id OPERATOR(pg_catalog.<=) 25) AND (e.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[103, 104, 105]))) ERROR: cannot pushdown the subquery DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer join RESET client_min_messages; ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Funnel, grouped by the number of times a user has done an event ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since the right of the UNION query is not joined on -- the partition key INSERT INTO agg_results_third (user_id, value_1_agg, value_2_agg) @@ -311,20 +311,20 @@ ORDER BY count_pay; DEBUG: Set operations are not allowed in distributed INSERT ... SELECT queries DEBUG: Collecting INSERT ... SELECT results on coordinator -DEBUG: generating subplan 19_1 for subquery SELECT users_table.user_id, 'action=>1'::text AS event, events_table."time" FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table.user_id OPERATOR(pg_catalog.>=) 10) AND (users_table.user_id OPERATOR(pg_catalog.<=) 70) AND (events_table.event_type OPERATOR(pg_catalog.>) 10) AND (events_table.event_type OPERATOR(pg_catalog.<) 12)) -DEBUG: generating subplan 19_2 for subquery SELECT users_table.user_id, 'action=>2'::text AS event, events_table."time" FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table.user_id OPERATOR(pg_catalog.>=) 10) AND (users_table.user_id OPERATOR(pg_catalog.<=) 70) AND (events_table.event_type OPERATOR(pg_catalog.>) 12) AND (events_table.event_type OPERATOR(pg_catalog.<) 14)) -DEBUG: Plan 19 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id, intermediate_result.event, intermediate_result."time" FROM read_intermediate_result('19_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event text, "time" timestamp without time zone) UNION SELECT intermediate_result.user_id, intermediate_result.event, intermediate_result."time" FROM read_intermediate_result('19_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event text, "time" timestamp without time zone) -DEBUG: generating subplan 18_1 for subquery SELECT users_table.user_id, 'action=>1'::text AS event, events_table."time" FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table.user_id OPERATOR(pg_catalog.>=) 10) AND (users_table.user_id OPERATOR(pg_catalog.<=) 70) AND (events_table.event_type OPERATOR(pg_catalog.>) 10) AND (events_table.event_type OPERATOR(pg_catalog.<) 12)) UNION SELECT users_table.user_id, 'action=>2'::text AS event, events_table."time" FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table.user_id OPERATOR(pg_catalog.>=) 10) AND (users_table.user_id OPERATOR(pg_catalog.<=) 70) AND (events_table.event_type OPERATOR(pg_catalog.>) 12) AND (events_table.event_type OPERATOR(pg_catalog.<) 14)) +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, 'action=>1'::text AS event, events_table."time" FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table.user_id OPERATOR(pg_catalog.>=) 10) AND (users_table.user_id OPERATOR(pg_catalog.<=) 70) AND (events_table.event_type OPERATOR(pg_catalog.>) 10) AND (events_table.event_type OPERATOR(pg_catalog.<) 12)) +DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id, 'action=>2'::text AS event, events_table."time" FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table.user_id OPERATOR(pg_catalog.>=) 10) AND (users_table.user_id OPERATOR(pg_catalog.<=) 70) AND (events_table.event_type OPERATOR(pg_catalog.>) 12) AND (events_table.event_type OPERATOR(pg_catalog.<) 14)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id, intermediate_result.event, intermediate_result."time" FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event text, "time" timestamp without time zone) UNION SELECT intermediate_result.user_id, intermediate_result.event, intermediate_result."time" FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event text, "time" timestamp without time zone) +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, 'action=>1'::text AS event, events_table."time" FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table.user_id OPERATOR(pg_catalog.>=) 10) AND (users_table.user_id OPERATOR(pg_catalog.<=) 70) AND (events_table.event_type OPERATOR(pg_catalog.>) 10) AND (events_table.event_type OPERATOR(pg_catalog.<) 12)) UNION SELECT users_table.user_id, 'action=>2'::text AS event, events_table."time" FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table.user_id OPERATOR(pg_catalog.>=) 10) AND (users_table.user_id OPERATOR(pg_catalog.<=) 70) AND (events_table.event_type OPERATOR(pg_catalog.>) 12) AND (events_table.event_type OPERATOR(pg_catalog.<) 14)) ERROR: cannot pushdown the subquery DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer join RESET client_min_messages; ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Most recently seen users_table events_table ------------------------------------- +--------------------------------------------------------------------- -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since lateral join is not an equi join INSERT INTO agg_results_third (user_id, agg_time, value_2_agg) SELECT @@ -412,11 +412,11 @@ FROM ( ORDER BY user_lastseen DESC; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Count the number of distinct users_table who are in segment X and Y and Z ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since partition key is NOT IN INSERT INTO agg_results_third (user_id) SELECT DISTINCT user_id @@ -444,11 +444,11 @@ WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_ AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find customers who have done X, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since join is not an euqi join INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -465,11 +465,11 @@ SELECT user_id, value_2 FROM users_table WHERE AND EXISTS (SELECT user_id FROM events_table WHERE event_type>101 AND event_type < 110 AND value_3 > 100 AND event_type = users_table.user_id); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who haven’t done X, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since the join is not an equi join INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -486,11 +486,11 @@ SELECT user_id, value_2 FROM users_table WHERE AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND event_type=users_table.user_id); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X and Y, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since the second join is not on the partition key INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -500,11 +500,11 @@ SELECT user_id, value_2 FROM users_table WHERE AND EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND user_id!=users_table.user_id); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since the first join is not on the partition key INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -513,11 +513,11 @@ SELECT user_id, value_2 FROM users_table WHERE AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 300 AND event_type <= 350 AND value_3 > 100 AND user_id=users_table.user_id); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X more than 2 times, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since the second join is not an equi join INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, @@ -572,11 +572,11 @@ INSERT INTO agg_results_third(user_id, value_2_agg) HAVING Count(*) > 2); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find me all users_table who has done some event and has filters ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable due to NOT IN INSERT INTO agg_results_third(user_id) Select user_id @@ -617,11 +617,11 @@ And event_type in And value_2 > 25); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Which events_table did people who has done some specific events_table ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable due to NOT IN INSERT INTO agg_results_third(user_id, value_1_agg) SELECT user_id, event_type FROM events_table @@ -643,11 +643,11 @@ WHERE event_type IN (SELECT user_id from events_table WHERE event_type > 500 and GROUP BY user_id, event_type; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find my assets that have the highest probability and fetch their metadata ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since the join is not an equi join INSERT INTO agg_results_third(user_id, value_1_agg, value_3_agg) SELECT diff --git a/src/test/regress/expected/multi_insert_select_window.out b/src/test/regress/expected/multi_insert_select_window.out index 6df46b924..aa3dea909 100644 --- a/src/test/regress/expected/multi_insert_select_window.out +++ b/src/test/regress/expected/multi_insert_select_window.out @@ -14,8 +14,8 @@ FROM ) as foo; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 101 | 6 | 3.2079207920792079 (1 row) @@ -33,8 +33,8 @@ FROM ) as foo; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 101 | 6 | 3.2079207920792079 (1 row) @@ -52,8 +52,8 @@ FROM ) as foo; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 101 | 6 | 3.2079207920792079 (1 row) @@ -74,8 +74,8 @@ FROM ) as foo; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 12 | 6 | 3.5000000000000000 (1 row) @@ -95,8 +95,8 @@ GROUP BY lag_event_type; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 8 | 2 | 1.1250000000000000 (1 row) @@ -116,8 +116,8 @@ SELECT * FROM ) as foo; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 30 | 6 | 3.4000000000000000 (1 row) @@ -138,8 +138,8 @@ SELECT * FROM ) as foo; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 20 | 6 | 3.3500000000000000 (1 row) @@ -175,8 +175,8 @@ JOIN sub_1.user_id; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -201,8 +201,8 @@ GROUP BY my_rank; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 1 | 1 | 4.0000000000000000 (1 row) @@ -227,8 +227,8 @@ GROUP BY my_rank; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 2 | 2 | 3.5000000000000000 (1 row) @@ -252,8 +252,8 @@ GROUP BY my_rank; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 1 | 1 | 4.0000000000000000 (1 row) @@ -274,8 +274,8 @@ LIMIT 10; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -292,8 +292,8 @@ SELECT user_id, max(sum) FROM ( GROUP BY user_id; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -318,8 +318,8 @@ GROUP BY user_id; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 4 | 4 | 2.5000000000000000 (1 row) @@ -338,8 +338,8 @@ SELECT * FROM ( ) a; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -361,8 +361,8 @@ GROUP BY user_id, rank; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 32 | 6 | 3.5937500000000000 (1 row) @@ -392,8 +392,8 @@ WHERE ) a; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -410,8 +410,8 @@ FROM ) a; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -428,8 +428,8 @@ SELECT * FROM ( ) a; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 26 | 6 | 3.7692307692307692 (1 row) @@ -450,8 +450,8 @@ LIMIT 10; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 10 | 5 | 3.8000000000000000 (1 row) @@ -471,8 +471,8 @@ FROM view_with_window_func; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 26 | 6 | 3.7692307692307692 (1 row) @@ -487,8 +487,8 @@ LIMIT -- get some statistics from the aggregated results to ensure the results are correct -- since there is a limit but not order, we cannot run avg(user_id) SELECT count(*) FROM agg_results_window; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) @@ -517,8 +517,8 @@ LIMIT -- get some statistics from the aggregated results to ensure the results are correct -- since there is a limit but not order, we cannot test avg or distinct count SELECT count(*) FROM agg_results_window; - count -------- + count +--------------------------------------------------------------------- 5 (1 row) @@ -544,8 +544,8 @@ GROUP BY user_id; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -589,8 +589,8 @@ LIMIT -- get some statistics from the aggregated results to ensure the results are correct -- since there is a limit but not order, we cannot test avg or distinct count SELECT count(*) FROM agg_results_window; - count -------- + count +--------------------------------------------------------------------- 5 (1 row) @@ -631,8 +631,8 @@ FROM ( ) AS ftop; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) diff --git a/src/test/regress/expected/multi_join_order_additional.out b/src/test/regress/expected/multi_join_order_additional.out index 8765c9735..86aec27af 100644 --- a/src/test/regress/expected/multi_join_order_additional.out +++ b/src/test/regress/expected/multi_join_order_additional.out @@ -30,9 +30,9 @@ CREATE TABLE lineitem_hash ( l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); SELECT create_distributed_table('lineitem_hash', 'l_orderkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE INDEX lineitem_hash_time_index ON lineitem_hash (l_shipdate); @@ -48,9 +48,9 @@ CREATE TABLE orders_hash ( o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); SELECT create_distributed_table('orders_hash', 'o_orderkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE customer_hash ( @@ -63,9 +63,9 @@ CREATE TABLE customer_hash ( c_mktsegment char(10) not null, c_comment varchar(117) not null); SELECT create_distributed_table('customer_hash', 'c_custkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET client_min_messages TO DEBUG2; @@ -76,8 +76,8 @@ DEBUG: Router planner does not support append-partitioned tables. LOG: join order: [ "lineitem" ][ local partition join "lineitem" ] DEBUG: join prunable for intervals [1,5986] and [8997,14947] DEBUG: join prunable for intervals [8997,14947] and [1,5986] - QUERY PLAN --------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (2 rows) @@ -90,8 +90,8 @@ EXPLAIN SELECT count(*) FROM lineitem, orders WHERE (l_orderkey = o_orderkey AND l_quantity > 5) OR (l_orderkey = o_orderkey AND l_quantity < 10); LOG: join order: [ "lineitem" ][ local partition join "orders" ] - QUERY PLAN --------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -103,8 +103,8 @@ ERROR: complex joins are only supported when all distributed tables are joined EXPLAIN SELECT count(*) FROM orders, lineitem_hash WHERE o_orderkey = l_orderkey; LOG: join order: [ "orders" ][ single range partition join "lineitem_hash" ] - QUERY PLAN --------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -114,8 +114,8 @@ LOG: join order: [ "orders" ][ single range partition join "lineitem_hash" ] EXPLAIN SELECT count(*) FROM orders_hash, lineitem_hash WHERE o_orderkey = l_orderkey; LOG: join order: [ "orders_hash" ][ local partition join "lineitem_hash" ] - QUERY PLAN --------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -125,8 +125,8 @@ LOG: join order: [ "orders_hash" ][ local partition join "lineitem_hash" ] EXPLAIN SELECT count(*) FROM customer_hash, nation WHERE c_nationkey = n_nationkey; LOG: join order: [ "customer_hash" ][ reference join "nation" ] - QUERY PLAN --------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -137,8 +137,8 @@ LOG: join order: [ "customer_hash" ][ reference join "nation" ] EXPLAIN SELECT count(*) FROM orders, lineitem, customer_append WHERE o_custkey = l_partkey AND o_custkey = c_nationkey; LOG: join order: [ "orders" ][ dual partition join "lineitem" ][ dual partition join "customer_append" ] - QUERY PLAN --------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -149,8 +149,8 @@ LOG: join order: [ "orders" ][ dual partition join "lineitem" ][ dual partition EXPLAIN SELECT count(*) FROM orders, customer_hash WHERE c_custkey = o_custkey; LOG: join order: [ "orders" ][ dual partition join "customer_hash" ] - QUERY PLAN --------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -161,8 +161,8 @@ LOG: join order: [ "orders" ][ dual partition join "customer_hash" ] EXPLAIN SELECT count(*) FROM orders_hash, customer_append WHERE c_custkey = o_custkey; LOG: join order: [ "orders_hash" ][ single range partition join "customer_append" ] - QUERY PLAN --------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled diff --git a/src/test/regress/expected/multi_join_order_tpch_repartition.out b/src/test/regress/expected/multi_join_order_tpch_repartition.out index 51bca0641..24beca674 100644 --- a/src/test/regress/expected/multi_join_order_tpch_repartition.out +++ b/src/test/regress/expected/multi_join_order_tpch_repartition.out @@ -21,8 +21,8 @@ WHERE and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; LOG: join order: [ "lineitem" ] - QUERY PLAN --------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -52,8 +52,8 @@ ORDER BY revenue DESC, o_orderdate; LOG: join order: [ "orders" ][ local partition join "lineitem" ][ single range partition join "customer_append" ] - QUERY PLAN ------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: (sum(remote_scan.revenue)) DESC, remote_scan.o_orderdate -> HashAggregate (cost=0.00..0.00 rows=0 width=0) @@ -95,8 +95,8 @@ GROUP BY ORDER BY revenue DESC; LOG: join order: [ "orders" ][ local partition join "lineitem" ][ single range partition join "customer_append" ][ reference join "nation" ] - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: (sum(remote_scan.revenue)) DESC -> HashAggregate (cost=0.00..0.00 rows=0 width=0) @@ -136,8 +136,8 @@ WHERE AND l_shipinstruct = 'DELIVER IN PERSON' ); LOG: join order: [ "lineitem" ][ single range partition join "part_append" ] - QUERY PLAN --------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -155,8 +155,8 @@ WHERE GROUP BY l_partkey; LOG: join order: [ "lineitem" ][ local partition join "orders" ][ single range partition join "part_append" ][ single range partition join "customer_append" ] - QUERY PLAN --------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- HashAggregate (cost=0.00..0.00 rows=0 width=0) Group Key: remote_scan.l_partkey -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) diff --git a/src/test/regress/expected/multi_join_order_tpch_small.out b/src/test/regress/expected/multi_join_order_tpch_small.out index 032d46a7d..b7ccdaafe 100644 --- a/src/test/regress/expected/multi_join_order_tpch_small.out +++ b/src/test/regress/expected/multi_join_order_tpch_small.out @@ -16,8 +16,8 @@ WHERE and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; LOG: join order: [ "lineitem" ] - QUERY PLAN ----------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -47,8 +47,8 @@ ORDER BY revenue DESC, o_orderdate; LOG: join order: [ "orders" ][ reference join "customer" ][ local partition join "lineitem" ] - QUERY PLAN ------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: (sum(remote_scan.revenue)) DESC, remote_scan.o_orderdate -> HashAggregate (cost=0.00..0.00 rows=0 width=0) @@ -90,8 +90,8 @@ GROUP BY ORDER BY revenue DESC; LOG: join order: [ "orders" ][ reference join "customer" ][ reference join "nation" ][ local partition join "lineitem" ] - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: (sum(remote_scan.revenue)) DESC -> HashAggregate (cost=0.00..0.00 rows=0 width=0) @@ -131,8 +131,8 @@ WHERE AND l_shipinstruct = 'DELIVER IN PERSON' ); LOG: join order: [ "lineitem" ][ reference join "part" ] - QUERY PLAN ----------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled diff --git a/src/test/regress/expected/multi_join_pruning.out b/src/test/regress/expected/multi_join_pruning.out index 7a0e933d9..59160fe10 100644 --- a/src/test/regress/expected/multi_join_pruning.out +++ b/src/test/regress/expected/multi_join_pruning.out @@ -11,8 +11,8 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders DEBUG: Router planner does not support append-partitioned tables. DEBUG: join prunable for intervals [1,5986] and [8997,14947] DEBUG: join prunable for intervals [8997,14947] and [1,5986] - sum | avg --------+-------------------- + sum | avg +--------------------------------------------------------------------- 36089 | 3.0074166666666667 (1 row) @@ -20,8 +20,8 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 9030; DEBUG: Router planner does not support append-partitioned tables. DEBUG: join prunable for intervals [8997,14947] and [1,5986] - sum | avg --------+-------------------- + sum | avg +--------------------------------------------------------------------- 17999 | 3.0189533713518953 (1 row) @@ -30,9 +30,9 @@ DEBUG: join prunable for intervals [8997,14947] and [1,5986] SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 20000; DEBUG: Router planner does not support append-partitioned tables. - sum | avg ------+----- - | + sum | avg +--------------------------------------------------------------------- + | (1 row) -- Partition pruning left three shards for the lineitem and one shard for the @@ -41,27 +41,27 @@ DEBUG: Router planner does not support append-partitioned tables. SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 6000 AND o_orderkey < 6000; DEBUG: Router planner does not support append-partitioned tables. - sum | avg ------+----- - | + sum | avg +--------------------------------------------------------------------- + | (1 row) -- Make sure that we can handle filters without a column SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND false; DEBUG: Router planner does not support append-partitioned tables. - sum | avg ------+----- - | + sum | avg +--------------------------------------------------------------------- + | (1 row) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem INNER JOIN orders ON (l_orderkey = o_orderkey) WHERE false; DEBUG: Router planner does not support append-partitioned tables. - sum | avg ------+----- - | + sum | avg +--------------------------------------------------------------------- + | (1 row) -- These tests check that we can do join pruning for tables partitioned over @@ -74,8 +74,8 @@ EXPLAIN SELECT count(*) DEBUG: Router planner does not support append-partitioned tables. DEBUG: join prunable for intervals [{},{AZZXSP27F21T6,AZZXSP27F21T6}] and [{BA1000U2AMO4ZGX,BZZXSP27F21T6},{CA1000U2AMO4ZGX,CZZXSP27F21T6}] DEBUG: join prunable for intervals [{BA1000U2AMO4ZGX,BZZXSP27F21T6},{CA1000U2AMO4ZGX,CZZXSP27F21T6}] and [{},{AZZXSP27F21T6,AZZXSP27F21T6}] - QUERY PLAN ----------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -87,8 +87,8 @@ EXPLAIN SELECT count(*) DEBUG: Router planner does not support append-partitioned tables. DEBUG: join prunable for intervals [(a,3,b),(b,4,c)] and [(c,5,d),(d,6,e)] DEBUG: join prunable for intervals [(c,5,d),(d,6,e)] and [(a,3,b),(b,4,c)] - QUERY PLAN ----------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -101,8 +101,8 @@ EXPLAIN SELECT count(*) DEBUG: Router planner does not support append-partitioned tables. DEBUG: join prunable for intervals [AA1000U2AMO4ZGX,AZZXSP27F21T6] and [BA1000U2AMO4ZGX,BZZXSP27F21T6] DEBUG: join prunable for intervals [BA1000U2AMO4ZGX,BZZXSP27F21T6] and [AA1000U2AMO4ZGX,AZZXSP27F21T6] - QUERY PLAN ----------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled diff --git a/src/test/regress/expected/multi_json_agg.out b/src/test/regress/expected/multi_json_agg.out index 9bdd79fd4..16503104c 100644 --- a/src/test/regress/expected/multi_json_agg.out +++ b/src/test/regress/expected/multi_json_agg.out @@ -12,8 +12,8 @@ $$; -- Check multi_cat_agg() aggregate which is used to implement json_agg() SELECT json_cat_agg(i) FROM (VALUES ('[1,{"a":2}]'::json), ('[null]'::json), (NULL), ('["3",5,4]'::json)) AS t(i); - json_cat_agg -------------------------------- + json_cat_agg +--------------------------------------------------------------------- [1, {"a":2}, null, "3", 5, 4] (1 row) @@ -27,8 +27,8 @@ ERROR: json_agg with order by is unsupported -- Check json_agg() for different data types and LIMIT clauses SELECT array_sort(json_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort --------------------------------------------------------- + array_sort +--------------------------------------------------------------------- [2132, 15635, 24027, 63700, 67310, 155190] [106170] [4297, 19036, 29380, 62143, 128449, 183095] @@ -43,8 +43,8 @@ SELECT array_sort(json_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(json_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort ------------------------------------------------------------------------ + array_sort +--------------------------------------------------------------------- [13309.60, 21168.23, 22824.48, 28955.64, 45983.16, 49620.16] [44694.46] [2618.76, 28733.64, 32986.52, 39890.88, 46796.47, 54058.05] @@ -59,8 +59,8 @@ SELECT array_sort(json_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(json_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort ----------------------------------------------------------------------------------------------------- + array_sort +--------------------------------------------------------------------- ["1996-01-29", "1996-01-30", "1996-03-13", "1996-03-30", "1996-04-12", "1996-04-21"] ["1997-01-28"] ["1993-10-29", "1993-11-09", "1993-12-04", "1993-12-14", "1994-01-16", "1994-02-02"] @@ -75,8 +75,8 @@ SELECT array_sort(json_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(json_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort ----------------------------------------------------------------------------------------------------- + array_sort +--------------------------------------------------------------------- ["AIR ", "FOB ", "MAIL ", "MAIL ", "REG AIR ", "TRUCK "] ["RAIL "] ["AIR ", "FOB ", "RAIL ", "RAIL ", "SHIP ", "TRUCK "] @@ -91,8 +91,8 @@ SELECT array_sort(json_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey -- Check that we can execute json_agg() within other functions SELECT json_array_length(json_agg(l_orderkey)) FROM lineitem; - json_array_length -------------------- + json_array_length +--------------------------------------------------------------------- 12000 (1 row) @@ -103,8 +103,8 @@ SELECT json_array_length(json_agg(l_orderkey)) FROM lineitem; SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(json_agg(l_orderkey)) FROM lineitem WHERE l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | count | avg | array_sort -------------+-------+-----------------------+-------------------------------------------------------------------------------------------------------------------- + l_quantity | count | avg | array_sort +--------------------------------------------------------------------- 1.00 | 17 | 1477.1258823529411765 | [5543, 5633, 5634, 5698, 5766, 5856, 5857, 5986, 8997, 9026, 9158, 9184, 9220, 9222, 9348, 9383, 9476] 2.00 | 19 | 3078.4242105263157895 | [5506, 5540, 5573, 5669, 5703, 5730, 5798, 5831, 5893, 5920, 5923, 9030, 9058, 9123, 9124, 9188, 9344, 9441, 9476] 3.00 | 14 | 4714.0392857142857143 | [5509, 5543, 5605, 5606, 5827, 9124, 9157, 9184, 9223, 9254, 9349, 9414, 9475, 9477] @@ -114,8 +114,8 @@ SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(json_agg(l_orderke SELECT l_quantity, array_sort(json_agg(extract (month FROM o_orderdate))) AS my_month FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | my_month -------------+------------------------------------------------------------------ + l_quantity | my_month +--------------------------------------------------------------------- 1.00 | [2, 3, 4, 4, 4, 5, 5, 5, 6, 7, 7, 7, 7, 9, 9, 11, 11] 2.00 | [1, 3, 5, 5, 5, 5, 6, 6, 6, 7, 7, 8, 10, 10, 11, 11, 11, 12, 12] 3.00 | [3, 4, 5, 6, 7, 7, 8, 8, 8, 9, 9, 10, 11, 11] @@ -125,8 +125,8 @@ SELECT l_quantity, array_sort(json_agg(extract (month FROM o_orderdate))) AS my_ SELECT l_quantity, array_sort(json_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE l_quantity < 5 AND octet_length(l_comment) + octet_length('randomtext'::text) > 40 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | array_sort -------------+--------------------------------------------------- + l_quantity | array_sort +--------------------------------------------------------------------- 1.00 | [11269, 11397, 11713, 11715, 11973, 18317, 18445] 2.00 | [11847, 18061, 18247, 18953] 3.00 | [18249, 18315, 18699, 18951, 18955] @@ -136,32 +136,32 @@ SELECT l_quantity, array_sort(json_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE -- Check that we can execute json_agg() with an expression containing NULL values SELECT json_agg(case when l_quantity > 20 then l_quantity else NULL end) FROM lineitem WHERE l_orderkey < 5; - json_agg -------------------------------------------------------------------------------------------------- + json_agg +--------------------------------------------------------------------- [null, 36.00, null, 28.00, 24.00, 32.00, 38.00, 45.00, 49.00, 27.00, null, 28.00, 26.00, 30.00] (1 row) -- Check that we can execute json_agg() with an expression containing different types SELECT json_agg(case when l_quantity > 20 then to_json(l_quantity) else '"f"'::json end) FROM lineitem WHERE l_orderkey < 5; - json_agg ----------------------------------------------------------------------------------------------- + json_agg +--------------------------------------------------------------------- ["f", 36.00, "f", 28.00, 24.00, 32.00, 38.00, 45.00, 49.00, 27.00, "f", 28.00, 26.00, 30.00] (1 row) -- Check that we can execute json_agg() with an expression containing json arrays SELECT json_agg(json_build_array(l_quantity, l_shipdate)) FROM lineitem WHERE l_orderkey < 3; - json_agg ------------------------------------------------------------------------------------------------------------------------------------------------------------------- + json_agg +--------------------------------------------------------------------- [[17.00, "1996-03-13"], [36.00, "1996-04-12"], [8.00, "1996-01-29"], [28.00, "1996-04-21"], [24.00, "1996-03-30"], [32.00, "1996-01-30"], [38.00, "1997-01-28"]] (1 row) -- Check that we can execute json_agg() with an expression containing arrays SELECT json_agg(ARRAY[l_quantity, l_orderkey]) FROM lineitem WHERE l_orderkey < 3; - json_agg --------------- + json_agg +--------------------------------------------------------------------- [[17.00,1], + [36.00,1], + [8.00,1], + @@ -173,8 +173,8 @@ SELECT json_agg(ARRAY[l_quantity, l_orderkey]) -- Check that we return NULL in case there are no input rows to json_agg() SELECT json_agg(l_orderkey) FROM lineitem WHERE l_quantity < 0; - json_agg ----------- - + json_agg +--------------------------------------------------------------------- + (1 row) diff --git a/src/test/regress/expected/multi_json_object_agg.out b/src/test/regress/expected/multi_json_object_agg.out index 79b79dee5..adb4d7ee1 100644 --- a/src/test/regress/expected/multi_json_object_agg.out +++ b/src/test/regress/expected/multi_json_object_agg.out @@ -17,8 +17,8 @@ $$; -- Check multi_cat_agg() aggregate which is used to implement json_object_agg() SELECT json_cat_agg(i) FROM (VALUES ('{"c":[], "b":2}'::json), (NULL), ('{"d":null, "a":{"b":3}, "b":2}'::json)) AS t(i); - json_cat_agg ------------------------------------------------------------ + json_cat_agg +--------------------------------------------------------------------- { "c" : [], "b" : 2, "d" : null, "a" : {"b":3}, "b" : 2 } (1 row) @@ -33,8 +33,8 @@ ERROR: json_object_agg with order by is unsupported SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_partkey)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - keys_sort ------------------------------------------------------------------------------------------------------------ + keys_sort +--------------------------------------------------------------------- { "11" : 155190, "12" : 67310, "13" : 63700, "14" : 2132, "15" : 24027, "16" : 15635 } { "21" : 106170 } { "31" : 4297, "32" : 19036, "33" : 128449, "34" : 29380, "35" : 183095, "36" : 62143 } @@ -50,8 +50,8 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_partk SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_extendedprice)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - keys_sort --------------------------------------------------------------------------------------------------------------------------- + keys_sort +--------------------------------------------------------------------- { "11" : 21168.23, "12" : 45983.16, "13" : 13309.60, "14" : 28955.64, "15" : 22824.48, "16" : 49620.16 } { "21" : 44694.46 } { "31" : 54058.05, "32" : 46796.47, "33" : 39890.88, "34" : 2618.76, "35" : 32986.52, "36" : 28733.64 } @@ -67,8 +67,8 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_exten SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_shipmode)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - keys_sort -------------------------------------------------------------------------------------------------------------------------------------------------------- + keys_sort +--------------------------------------------------------------------- { "11" : "TRUCK ", "12" : "MAIL ", "13" : "REG AIR ", "14" : "AIR ", "15" : "FOB ", "16" : "MAIL " } { "21" : "RAIL " } { "31" : "AIR ", "32" : "RAIL ", "33" : "SHIP ", "34" : "TRUCK ", "35" : "FOB ", "36" : "RAIL " } @@ -84,8 +84,8 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_shipm SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_shipdate)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - keys_sort -------------------------------------------------------------------------------------------------------------------------------------------------------- + keys_sort +--------------------------------------------------------------------- { "11" : "1996-03-13", "12" : "1996-04-12", "13" : "1996-01-29", "14" : "1996-04-21", "15" : "1996-03-30", "16" : "1996-01-30" } { "21" : "1997-01-28" } { "31" : "1994-02-02", "32" : "1993-11-09", "33" : "1994-01-16", "34" : "1993-12-04", "35" : "1993-12-14", "36" : "1993-10-29" } @@ -100,8 +100,8 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_shipd -- Check that we can execute json_object_agg() within other functions SELECT count_keys(json_object_agg(l_shipdate, l_orderkey)) FROM lineitem; - count_keys ------------- + count_keys +--------------------------------------------------------------------- 12000 (1 row) @@ -114,8 +114,8 @@ SELECT l_quantity, count(*), avg(l_extendedprice), FROM lineitem WHERE l_quantity < 5 AND l_orderkey > 5000 AND l_orderkey < 5300 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | count | avg | keys_sort -------------+-------+-----------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + l_quantity | count | avg | keys_sort +--------------------------------------------------------------------- 1.00 | 8 | 1748.3387500000000000 | { "50635" : "1997-09-03", "51551" : "1994-07-03", "51872" : "1997-08-08", "52221" : "1994-08-19", "52832" : "1994-06-20", "52855" : "1994-03-14", "52856" : "1994-02-08", "52861" : "1997-11-25" } 2.00 | 8 | 2990.9825000000000000 | { "50292" : "1992-11-25", "50633" : "1997-06-17", "50904" : "1997-04-07", "50952" : "1992-07-09", "51216" : "1992-08-10", "52191" : "1997-06-26", "52501" : "1995-08-09", "52551" : "1996-09-27" } 3.00 | 2 | 4744.8000000000000000 | { "50275" : "1997-09-30", "52863" : "1997-12-04" } @@ -126,8 +126,8 @@ SELECT l_quantity, keys_sort(json_object_agg(l_orderkey::text || l_linenumber::t extract (month FROM o_orderdate))) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5 AND l_orderkey > 5000 AND l_orderkey < 5300 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | keys_sort -------------+------------------------------------------------------------------------------------------------------------- + l_quantity | keys_sort +--------------------------------------------------------------------- 1.00 | { "50635" : 5, "51551" : 6, "51872" : 7, "52221" : 5, "52832" : 6, "52855" : 1, "52856" : 1, "52861" : 9 } 2.00 | { "50292" : 11, "50633" : 5, "50904" : 3, "50952" : 4, "51216" : 5, "52191" : 2, "52501" : 7, "52551" : 7 } 3.00 | { "50275" : 8, "52863" : 9 } @@ -138,8 +138,8 @@ SELECT l_quantity, keys_sort(json_object_agg(l_orderkey::text || l_linenumber::t FROM lineitem WHERE l_quantity < 5 AND octet_length(l_comment) + octet_length('randomtext'::text) > 40 AND l_orderkey > 5000 AND l_orderkey < 6000 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | keys_sort -------------+-------------------------------------------------------------------------------------------------------------------------------------------- + l_quantity | keys_sort +--------------------------------------------------------------------- 1.00 | { "51551" : 10311, "52221" : 10445, "52855" : 10571, "56345" : 11269, "56986" : 11397, "58561" : 11713, "58573" : 11715, "59863" : 11973 } 2.00 | { "52191" : 10439, "53513" : 10703, "59233" : 11847 } 3.00 | { "54401" : 10881 } @@ -150,8 +150,8 @@ SELECT l_quantity, keys_sort(json_object_agg(l_orderkey::text || l_linenumber::t SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, case when l_quantity > 20 then l_quantity else NULL end)) FROM lineitem WHERE l_orderkey < 5; - keys_sort ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + keys_sort +--------------------------------------------------------------------- { "11" : null, "12" : 36.00, "13" : null, "14" : 28.00, "15" : 24.00, "16" : 32.00, "21" : 38.00, "31" : 45.00, "32" : 49.00, "33" : 27.00, "34" : null, "35" : 28.00, "36" : 26.00, "41" : 30.00 } (1 row) @@ -159,31 +159,31 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, case when l_quantity > 20 then to_json(l_quantity) else '"f"'::json end)) FROM lineitem WHERE l_orderkey < 5; - keys_sort --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + keys_sort +--------------------------------------------------------------------- { "11" : "f", "12" : 36.00, "13" : "f", "14" : 28.00, "15" : 24.00, "16" : 32.00, "21" : 38.00, "31" : 45.00, "32" : 49.00, "33" : 27.00, "34" : "f", "35" : 28.00, "36" : 26.00, "41" : 30.00 } (1 row) -- Check that we can execute json_object_agg() with an expression containing json arrays SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, json_build_array(l_quantity, l_shipdate))) FROM lineitem WHERE l_orderkey < 3; - keys_sort ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + keys_sort +--------------------------------------------------------------------- { "11" : [17.00, "1996-03-13"], "12" : [36.00, "1996-04-12"], "13" : [8.00, "1996-01-29"], "14" : [28.00, "1996-04-21"], "15" : [24.00, "1996-03-30"], "16" : [32.00, "1996-01-30"], "21" : [38.00, "1997-01-28"] } (1 row) -- Check that we can execute json_object_agg() with an expression containing arrays SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, ARRAY[l_quantity, l_orderkey])) FROM lineitem WHERE l_orderkey < 3; - keys_sort ---------------------------------------------------------------------------------------------------------------------------------- + keys_sort +--------------------------------------------------------------------- { "11" : [17.00,1], "12" : [36.00,1], "13" : [8.00,1], "14" : [28.00,1], "15" : [24.00,1], "16" : [32.00,1], "21" : [38.00,2] } (1 row) -- Check that we return NULL in case there are no input rows to json_object_agg() SELECT json_object_agg(l_shipdate, l_orderkey) FROM lineitem WHERE l_quantity < 0; - json_object_agg ------------------ - + json_object_agg +--------------------------------------------------------------------- + (1 row) diff --git a/src/test/regress/expected/multi_jsonb_agg.out b/src/test/regress/expected/multi_jsonb_agg.out index 0fee5e6e7..79d787d86 100644 --- a/src/test/regress/expected/multi_jsonb_agg.out +++ b/src/test/regress/expected/multi_jsonb_agg.out @@ -12,8 +12,8 @@ $$; -- Check multi_cat_agg() aggregate which is used to implement jsonb_agg() SELECT jsonb_cat_agg(i) FROM (VALUES ('[1,{"a":2}]'::jsonb), ('[null]'::jsonb), (NULL), ('["3",5,4]'::jsonb)) AS t(i); - jsonb_cat_agg --------------------------------- + jsonb_cat_agg +--------------------------------------------------------------------- [1, {"a": 2}, null, "3", 5, 4] (1 row) @@ -27,8 +27,8 @@ ERROR: jsonb_agg with order by is unsupported -- Check jsonb_agg() for different data types and LIMIT clauses SELECT array_sort(jsonb_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort --------------------------------------------------------- + array_sort +--------------------------------------------------------------------- [2132, 15635, 24027, 63700, 67310, 155190] [106170] [4297, 19036, 29380, 62143, 128449, 183095] @@ -43,8 +43,8 @@ SELECT array_sort(jsonb_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(jsonb_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort ------------------------------------------------------------------------ + array_sort +--------------------------------------------------------------------- [13309.60, 21168.23, 22824.48, 28955.64, 45983.16, 49620.16] [44694.46] [2618.76, 28733.64, 32986.52, 39890.88, 46796.47, 54058.05] @@ -59,8 +59,8 @@ SELECT array_sort(jsonb_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(jsonb_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort ----------------------------------------------------------------------------------------------------- + array_sort +--------------------------------------------------------------------- ["1996-01-29", "1996-01-30", "1996-03-13", "1996-03-30", "1996-04-12", "1996-04-21"] ["1997-01-28"] ["1993-10-29", "1993-11-09", "1993-12-04", "1993-12-14", "1994-01-16", "1994-02-02"] @@ -75,8 +75,8 @@ SELECT array_sort(jsonb_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(jsonb_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort ----------------------------------------------------------------------------------------------------- + array_sort +--------------------------------------------------------------------- ["AIR ", "FOB ", "MAIL ", "MAIL ", "REG AIR ", "TRUCK "] ["RAIL "] ["AIR ", "FOB ", "RAIL ", "RAIL ", "SHIP ", "TRUCK "] @@ -91,8 +91,8 @@ SELECT array_sort(jsonb_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey -- Check that we can execute jsonb_agg() within other functions SELECT jsonb_array_length(jsonb_agg(l_orderkey)) FROM lineitem; - jsonb_array_length --------------------- + jsonb_array_length +--------------------------------------------------------------------- 12000 (1 row) @@ -103,8 +103,8 @@ SELECT jsonb_array_length(jsonb_agg(l_orderkey)) FROM lineitem; SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(jsonb_agg(l_orderkey)) FROM lineitem WHERE l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | count | avg | array_sort -------------+-------+-----------------------+-------------------------------------------------------------------------------------------------------------------- + l_quantity | count | avg | array_sort +--------------------------------------------------------------------- 1.00 | 17 | 1477.1258823529411765 | [5543, 5633, 5634, 5698, 5766, 5856, 5857, 5986, 8997, 9026, 9158, 9184, 9220, 9222, 9348, 9383, 9476] 2.00 | 19 | 3078.4242105263157895 | [5506, 5540, 5573, 5669, 5703, 5730, 5798, 5831, 5893, 5920, 5923, 9030, 9058, 9123, 9124, 9188, 9344, 9441, 9476] 3.00 | 14 | 4714.0392857142857143 | [5509, 5543, 5605, 5606, 5827, 9124, 9157, 9184, 9223, 9254, 9349, 9414, 9475, 9477] @@ -114,8 +114,8 @@ SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(jsonb_agg(l_orderk SELECT l_quantity, array_sort(jsonb_agg(extract (month FROM o_orderdate))) AS my_month FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | my_month -------------+------------------------------------------------------------------ + l_quantity | my_month +--------------------------------------------------------------------- 1.00 | [2, 3, 4, 4, 4, 5, 5, 5, 6, 7, 7, 7, 7, 9, 9, 11, 11] 2.00 | [1, 3, 5, 5, 5, 5, 6, 6, 6, 7, 7, 8, 10, 10, 11, 11, 11, 12, 12] 3.00 | [3, 4, 5, 6, 7, 7, 8, 8, 8, 9, 9, 10, 11, 11] @@ -125,8 +125,8 @@ SELECT l_quantity, array_sort(jsonb_agg(extract (month FROM o_orderdate))) AS my SELECT l_quantity, array_sort(jsonb_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE l_quantity < 5 AND octet_length(l_comment) + octet_length('randomtext'::text) > 40 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | array_sort -------------+--------------------------------------------------- + l_quantity | array_sort +--------------------------------------------------------------------- 1.00 | [11269, 11397, 11713, 11715, 11973, 18317, 18445] 2.00 | [11847, 18061, 18247, 18953] 3.00 | [18249, 18315, 18699, 18951, 18955] @@ -136,39 +136,39 @@ SELECT l_quantity, array_sort(jsonb_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE -- Check that we can execute jsonb_agg() with an expression containing NULL values SELECT jsonb_agg(case when l_quantity > 20 then l_quantity else NULL end) FROM lineitem WHERE l_orderkey < 5; - jsonb_agg -------------------------------------------------------------------------------------------------- + jsonb_agg +--------------------------------------------------------------------- [null, 36.00, null, 28.00, 24.00, 32.00, 38.00, 45.00, 49.00, 27.00, null, 28.00, 26.00, 30.00] (1 row) -- Check that we can execute jsonb_agg() with an expression containing different types SELECT jsonb_agg(case when l_quantity > 20 then to_jsonb(l_quantity) else '"f"'::jsonb end) FROM lineitem WHERE l_orderkey < 5; - jsonb_agg ----------------------------------------------------------------------------------------------- + jsonb_agg +--------------------------------------------------------------------- ["f", 36.00, "f", 28.00, 24.00, 32.00, 38.00, 45.00, 49.00, 27.00, "f", 28.00, 26.00, 30.00] (1 row) -- Check that we can execute jsonb_agg() with an expression containing jsonb arrays SELECT jsonb_agg(jsonb_build_array(l_quantity, l_shipdate)) FROM lineitem WHERE l_orderkey < 3; - jsonb_agg ------------------------------------------------------------------------------------------------------------------------------------------------------------------- + jsonb_agg +--------------------------------------------------------------------- [[17.00, "1996-03-13"], [36.00, "1996-04-12"], [8.00, "1996-01-29"], [28.00, "1996-04-21"], [24.00, "1996-03-30"], [32.00, "1996-01-30"], [38.00, "1997-01-28"]] (1 row) -- Check that we can execute jsonb_agg() with an expression containing arrays SELECT jsonb_agg(ARRAY[l_quantity, l_orderkey]) FROM lineitem WHERE l_orderkey < 3; - jsonb_agg -------------------------------------------------------------------------------------- + jsonb_agg +--------------------------------------------------------------------- [[17.00, 1], [36.00, 1], [8.00, 1], [28.00, 1], [24.00, 1], [32.00, 1], [38.00, 2]] (1 row) -- Check that we return NULL in case there are no input rows to jsonb_agg() SELECT jsonb_agg(l_orderkey) FROM lineitem WHERE l_quantity < 0; - jsonb_agg ------------ - + jsonb_agg +--------------------------------------------------------------------- + (1 row) diff --git a/src/test/regress/expected/multi_jsonb_object_agg.out b/src/test/regress/expected/multi_jsonb_object_agg.out index 215920fe8..0a85c7dbd 100644 --- a/src/test/regress/expected/multi_jsonb_object_agg.out +++ b/src/test/regress/expected/multi_jsonb_object_agg.out @@ -10,8 +10,8 @@ $$; -- Check multi_cat_agg() aggregate which is used to implement jsonb_object_agg() SELECT jsonb_cat_agg(i) FROM (VALUES ('{"c":[], "b":2}'::jsonb), (NULL), ('{"d":null, "a":{"b":3}, "b":2}'::jsonb)) AS t(i); - jsonb_cat_agg ---------------------------------------------- + jsonb_cat_agg +--------------------------------------------------------------------- {"a": {"b": 3}, "b": 2, "c": [], "d": null} (1 row) @@ -26,8 +26,8 @@ ERROR: jsonb_object_agg with order by is unsupported SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_partkey) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - jsonb_object_agg --------------------------------------------------------------------------------------------------- + jsonb_object_agg +--------------------------------------------------------------------- {"11": 155190, "12": 67310, "13": 63700, "14": 2132, "15": 24027, "16": 15635} {"21": 106170} {"31": 4297, "32": 19036, "33": 128449, "34": 29380, "35": 183095, "36": 62143} @@ -43,8 +43,8 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_partkey) SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_extendedprice) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - jsonb_object_agg ------------------------------------------------------------------------------------------------------------------ + jsonb_object_agg +--------------------------------------------------------------------- {"11": 21168.23, "12": 45983.16, "13": 13309.60, "14": 28955.64, "15": 22824.48, "16": 49620.16} {"21": 44694.46} {"31": 54058.05, "32": 46796.47, "33": 39890.88, "34": 2618.76, "35": 32986.52, "36": 28733.64} @@ -60,8 +60,8 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_extendedprice) SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_shipmode) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - jsonb_object_agg ----------------------------------------------------------------------------------------------------------------------------------------------- + jsonb_object_agg +--------------------------------------------------------------------- {"11": "TRUCK ", "12": "MAIL ", "13": "REG AIR ", "14": "AIR ", "15": "FOB ", "16": "MAIL "} {"21": "RAIL "} {"31": "AIR ", "32": "RAIL ", "33": "SHIP ", "34": "TRUCK ", "35": "FOB ", "36": "RAIL "} @@ -77,8 +77,8 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_shipmode) SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_shipdate) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - jsonb_object_agg ----------------------------------------------------------------------------------------------------------------------------------------------- + jsonb_object_agg +--------------------------------------------------------------------- {"11": "1996-03-13", "12": "1996-04-12", "13": "1996-01-29", "14": "1996-04-21", "15": "1996-03-30", "16": "1996-01-30"} {"21": "1997-01-28"} {"31": "1994-02-02", "32": "1993-11-09", "33": "1994-01-16", "34": "1993-12-04", "35": "1993-12-14", "36": "1993-10-29"} @@ -93,8 +93,8 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_shipdate) -- Check that we can execute jsonb_object_agg() within other functions SELECT count_keys(jsonb_object_agg(l_shipdate, l_orderkey)) FROM lineitem; - count_keys ------------- + count_keys +--------------------------------------------------------------------- 2470 (1 row) @@ -107,8 +107,8 @@ SELECT l_quantity, count(*), avg(l_extendedprice), FROM lineitem WHERE l_quantity < 5 AND l_orderkey > 5000 AND l_orderkey < 5300 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | count | avg | jsonb_object_agg -------------+-------+-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + l_quantity | count | avg | jsonb_object_agg +--------------------------------------------------------------------- 1.00 | 8 | 1748.3387500000000000 | {"50635": "1997-09-03", "51551": "1994-07-03", "51872": "1997-08-08", "52221": "1994-08-19", "52832": "1994-06-20", "52855": "1994-03-14", "52856": "1994-02-08", "52861": "1997-11-25"} 2.00 | 8 | 2990.9825000000000000 | {"50292": "1992-11-25", "50633": "1997-06-17", "50904": "1997-04-07", "50952": "1992-07-09", "51216": "1992-08-10", "52191": "1997-06-26", "52501": "1995-08-09", "52551": "1996-09-27"} 3.00 | 2 | 4744.8000000000000000 | {"50275": "1997-09-30", "52863": "1997-12-04"} @@ -119,8 +119,8 @@ SELECT l_quantity, jsonb_object_agg(l_orderkey::text || l_linenumber::text, extract (month FROM o_orderdate)) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5 AND l_orderkey > 5000 AND l_orderkey < 5300 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | jsonb_object_agg -------------+--------------------------------------------------------------------------------------------------- + l_quantity | jsonb_object_agg +--------------------------------------------------------------------- 1.00 | {"50635": 5, "51551": 6, "51872": 7, "52221": 5, "52832": 6, "52855": 1, "52856": 1, "52861": 9} 2.00 | {"50292": 11, "50633": 5, "50904": 3, "50952": 4, "51216": 5, "52191": 2, "52501": 7, "52551": 7} 3.00 | {"50275": 8, "52863": 9} @@ -131,8 +131,8 @@ SELECT l_quantity, jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_or FROM lineitem WHERE l_quantity < 5 AND octet_length(l_comment) + octet_length('randomtext'::text) > 40 AND l_orderkey > 5000 AND l_orderkey < 6000 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | jsonb_object_agg -------------+---------------------------------------------------------------------------------------------------------------------------------- + l_quantity | jsonb_object_agg +--------------------------------------------------------------------- 1.00 | {"51551": 10311, "52221": 10445, "52855": 10571, "56345": 11269, "56986": 11397, "58561": 11713, "58573": 11715, "59863": 11973} 2.00 | {"52191": 10439, "53513": 10703, "59233": 11847} 3.00 | {"54401": 10881} @@ -143,8 +143,8 @@ SELECT l_quantity, jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_or SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, case when l_quantity > 20 then l_quantity else NULL end) FROM lineitem WHERE l_orderkey < 5; - jsonb_object_agg -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + jsonb_object_agg +--------------------------------------------------------------------- {"11": null, "12": 36.00, "13": null, "14": 28.00, "15": 24.00, "16": 32.00, "21": 38.00, "31": 45.00, "32": 49.00, "33": 27.00, "34": null, "35": 28.00, "36": 26.00, "41": 30.00} (1 row) @@ -152,31 +152,31 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, case when l_quantity > 20 then to_jsonb(l_quantity) else '"f"'::jsonb end) FROM lineitem WHERE l_orderkey < 5; - jsonb_object_agg ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + jsonb_object_agg +--------------------------------------------------------------------- {"11": "f", "12": 36.00, "13": "f", "14": 28.00, "15": 24.00, "16": 32.00, "21": 38.00, "31": 45.00, "32": 49.00, "33": 27.00, "34": "f", "35": 28.00, "36": 26.00, "41": 30.00} (1 row) -- Check that we can execute jsonb_object_agg() with an expression containing jsonb arrays SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, jsonb_build_array(l_quantity, l_shipdate)) FROM lineitem WHERE l_orderkey < 3; - jsonb_object_agg ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + jsonb_object_agg +--------------------------------------------------------------------- {"11": [17.00, "1996-03-13"], "12": [36.00, "1996-04-12"], "13": [8.00, "1996-01-29"], "14": [28.00, "1996-04-21"], "15": [24.00, "1996-03-30"], "16": [32.00, "1996-01-30"], "21": [38.00, "1997-01-28"]} (1 row) -- Check that we can execute jsonb_object_agg() with an expression containing arrays SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, ARRAY[l_quantity, l_orderkey]) FROM lineitem WHERE l_orderkey < 3; - jsonb_object_agg -------------------------------------------------------------------------------------------------------------------------------- + jsonb_object_agg +--------------------------------------------------------------------- {"11": [17.00, 1], "12": [36.00, 1], "13": [8.00, 1], "14": [28.00, 1], "15": [24.00, 1], "16": [32.00, 1], "21": [38.00, 2]} (1 row) -- Check that we return NULL in case there are no input rows to jsonb_object_agg() SELECT jsonb_object_agg(l_shipdate, l_orderkey) FROM lineitem WHERE l_quantity < 0; - jsonb_object_agg ------------------- - + jsonb_object_agg +--------------------------------------------------------------------- + (1 row) diff --git a/src/test/regress/expected/multi_limit_clause.out b/src/test/regress/expected/multi_limit_clause.out index 9905d925b..6d891ddde 100644 --- a/src/test/regress/expected/multi_limit_clause.out +++ b/src/test/regress/expected/multi_limit_clause.out @@ -3,9 +3,9 @@ -- CREATE TABLE lineitem_hash (LIKE lineitem); SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO lineitem_hash SELECT * FROM lineitem; @@ -17,8 +17,8 @@ SET client_min_messages TO DEBUG1; SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC; - count_quantity | l_quantity -----------------+------------ + count_quantity | l_quantity +--------------------------------------------------------------------- 219 | 13.00 222 | 29.00 227 | 3.00 @@ -55,8 +55,8 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity DESC, l_quantity DESC; - count_quantity | l_quantity -----------------+------------ + count_quantity | l_quantity +--------------------------------------------------------------------- 273 | 28.00 264 | 30.00 261 | 23.00 @@ -93,8 +93,8 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC LIMIT 5; - count_quantity | l_quantity -----------------+------------ + count_quantity | l_quantity +--------------------------------------------------------------------- 219 | 13.00 222 | 29.00 227 | 3.00 @@ -105,8 +105,8 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC LIMIT 10; - count_quantity | l_quantity -----------------+------------ + count_quantity | l_quantity +--------------------------------------------------------------------- 219 | 13.00 222 | 29.00 227 | 3.00 @@ -122,8 +122,8 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity DESC, l_quantity DESC LIMIT 10; - count_quantity | l_quantity -----------------+------------ + count_quantity | l_quantity +--------------------------------------------------------------------- 273 | 28.00 264 | 30.00 261 | 23.00 @@ -139,51 +139,51 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 -- Check that we can handle limits for simple sort clauses. We order by columns -- in the first two tests, and then by a simple expression in the last test. SELECT min(l_orderkey) FROM lineitem; - min ------ + min +--------------------------------------------------------------------- 1 (1 row) SELECT l_orderkey FROM lineitem ORDER BY l_orderkey ASC LIMIT 1; DEBUG: push down of limit count: 1 - l_orderkey ------------- + l_orderkey +--------------------------------------------------------------------- 1 (1 row) SELECT max(l_orderkey) FROM lineitem; - max -------- + max +--------------------------------------------------------------------- 14947 (1 row) SELECT l_orderkey FROM lineitem ORDER BY l_orderkey DESC LIMIT 1; DEBUG: push down of limit count: 1 - l_orderkey ------------- + l_orderkey +--------------------------------------------------------------------- 14947 (1 row) SELECT * FROM lineitem ORDER BY l_orderkey DESC, l_linenumber DESC LIMIT 3; DEBUG: push down of limit count: 3 - l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment -------------+-----------+-----------+--------------+------------+-----------------+------------+-------+--------------+--------------+------------+--------------+---------------+---------------------------+------------+--------------------------------- + l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment +--------------------------------------------------------------------- 14947 | 107098 | 7099 | 2 | 29.00 | 32047.61 | 0.04 | 0.06 | N | O | 11-08-1995 | 08-30-1995 | 12-03-1995 | TAKE BACK RETURN | FOB | inal sentiments t 14947 | 31184 | 3688 | 1 | 14.00 | 15612.52 | 0.09 | 0.02 | N | O | 11-05-1995 | 09-25-1995 | 11-27-1995 | TAKE BACK RETURN | RAIL | bout the even, iro 14946 | 79479 | 4494 | 2 | 37.00 | 53963.39 | 0.01 | 0.01 | N | O | 11-27-1996 | 02-01-1997 | 11-29-1996 | COLLECT COD | AIR | sleep furiously after the furio (3 rows) SELECT max(extract(epoch from l_shipdate)) FROM lineitem; - max ------------ + max +--------------------------------------------------------------------- 912124800 (1 row) SELECT * FROM lineitem ORDER BY extract(epoch from l_shipdate) DESC, l_orderkey DESC LIMIT 3; DEBUG: push down of limit count: 3 - l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment -------------+-----------+-----------+--------------+------------+-----------------+------------+-------+--------------+--------------+------------+--------------+---------------+---------------------------+------------+-------------------------------------- + l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment +--------------------------------------------------------------------- 4678 | 57388 | 9894 | 1 | 35.00 | 47088.30 | 0.04 | 0.08 | N | O | 11-27-1998 | 10-02-1998 | 12-17-1998 | TAKE BACK RETURN | AIR | he accounts. fluffily bold sheaves b 12384 | 84161 | 1686 | 5 | 6.00 | 6870.96 | 0.04 | 0.00 | N | O | 11-26-1998 | 10-04-1998 | 12-08-1998 | COLLECT COD | RAIL | ep blithely. blithely ironic r 1124 | 92298 | 4808 | 3 | 35.00 | 45160.15 | 0.10 | 0.05 | N | O | 11-25-1998 | 10-08-1998 | 12-25-1998 | TAKE BACK RETURN | AIR | ut the slyly bold pinto beans; fi @@ -196,8 +196,8 @@ SELECT l_quantity, l_discount, avg(l_partkey) FROM lineitem GROUP BY l_quantity, l_discount ORDER BY l_quantity LIMIT 1; DEBUG: push down of limit count: 1 - l_quantity | l_discount | avg -------------+------------+-------------------- + l_quantity | l_discount | avg +--------------------------------------------------------------------- 1.00 | 0.00 | 99167.304347826087 (1 row) @@ -206,8 +206,8 @@ SELECT l_quantity, l_discount, avg(l_partkey) FROM lineitem GROUP BY l_quantity, l_discount ORDER BY l_quantity, l_discount LIMIT 1; DEBUG: push down of limit count: 1 - l_quantity | l_discount | avg -------------+------------+-------------------- + l_quantity | l_discount | avg +--------------------------------------------------------------------- 1.00 | 0.00 | 99167.304347826087 (1 row) @@ -218,8 +218,8 @@ SELECT l_orderkey, count(DISTINCT l_partkey) GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 5; DEBUG: push down of limit count: 5 - l_orderkey | count -------------+------- + l_orderkey | count +--------------------------------------------------------------------- 14885 | 7 14884 | 7 14821 | 7 @@ -232,8 +232,8 @@ SELECT l_orderkey GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 5; DEBUG: push down of limit count: 5 - l_orderkey ------------- + l_orderkey +--------------------------------------------------------------------- 1 2 3 @@ -246,8 +246,8 @@ SELECT max(l_orderkey) FROM lineitem_hash GROUP BY l_linestatus ORDER BY 1 DESC LIMIT 2; - max -------- + max +--------------------------------------------------------------------- 14947 14916 (2 rows) @@ -257,8 +257,8 @@ SELECT l_orderkey, max(l_shipdate) FROM lineitem GROUP BY l_orderkey ORDER BY 2 DESC, 1 LIMIT 5; - l_orderkey | max -------------+------------ + l_orderkey | max +--------------------------------------------------------------------- 4678 | 11-27-1998 12384 | 11-26-1998 1124 | 11-25-1998 @@ -273,8 +273,8 @@ SELECT GROUP BY l_linestatus, l_orderkey ORDER BY 3 DESC, 1, 2 LIMIT 5; DEBUG: push down of limit count: 5 - l_linestatus | l_orderkey | max ---------------+------------+------------ + l_linestatus | l_orderkey | max +--------------------------------------------------------------------- O | 4678 | 11-27-1998 O | 12384 | 11-26-1998 O | 1124 | 11-25-1998 @@ -288,8 +288,8 @@ SELECT FROM lineitem_hash GROUP BY l_linestatus, l_shipmode ORDER BY 3 DESC, 1, 2 LIMIT 5; - l_linestatus | l_shipmode | max ---------------+------------+------------ + l_linestatus | l_shipmode | max +--------------------------------------------------------------------- O | AIR | 11-27-1998 O | RAIL | 11-26-1998 O | SHIP | 11-21-1998 @@ -305,8 +305,8 @@ SELECT ORDER BY l_orderkey, l_linenumber LIMIT 5; DEBUG: push down of limit count: 5 - l_orderkey | l_linenumber -------------+-------------- + l_orderkey | l_linenumber +--------------------------------------------------------------------- 1 | 1 1 | 2 1 | 3 @@ -321,8 +321,8 @@ SELECT GROUP BY l_orderkey, l_linenumber ORDER BY l_linenumber, l_orderkey LIMIT 5; - l_orderkey | l_linenumber -------------+-------------- + l_orderkey | l_linenumber +--------------------------------------------------------------------- 1 | 1 1 | 2 1 | 3 @@ -340,8 +340,8 @@ SELECT ORDER BY l_linenumber, l_orderkey LIMIT 5; DEBUG: push down of limit count: 5 - l_orderkey | l_linenumber -------------+-------------- + l_orderkey | l_linenumber +--------------------------------------------------------------------- 1 | 1 2 | 1 3 | 1 @@ -359,8 +359,8 @@ SELECT GROUP BY l_orderkey, (1+1), l_linenumber ORDER BY l_linenumber, (1+1), l_orderkey LIMIT 5; - l_orderkey | l_linenumber -------------+-------------- + l_orderkey | l_linenumber +--------------------------------------------------------------------- 1 | 1 1 | 2 1 | 3 @@ -377,8 +377,8 @@ SELECT GROUP BY l_orderkey, l_linenumber ORDER BY l_linenumber, l_orderkey LIMIT 5; - l_orderkey | l_linenumber -------------+-------------- + l_orderkey | l_linenumber +--------------------------------------------------------------------- 1 | 1 1 | 2 1 | 3 @@ -396,8 +396,8 @@ SELECT ORDER BY l_orderkey + 1 LIMIT 5; DEBUG: push down of limit count: 5 - ?column? ----------- + ?column? +--------------------------------------------------------------------- 2 3 4 @@ -414,8 +414,8 @@ SELECT GROUP BY l_orderkey + 1 ORDER BY l_orderkey + 1 , 2 LIMIT 5; - ?column? | count -----------+------- + ?column? | count +--------------------------------------------------------------------- 2 | 6 3 | 1 4 | 6 @@ -432,8 +432,8 @@ SELECT ORDER BY l_orderkey , 2 LIMIT 5; DEBUG: push down of limit count: 5 - l_orderkey | count -------------+------- + l_orderkey | count +--------------------------------------------------------------------- 1 | 6 2 | 1 3 | 6 @@ -449,8 +449,8 @@ SELECT GROUP BY l_orderkey ORDER BY 2 DESC, 1 LIMIT 2; - l_orderkey | count -------------+------- + l_orderkey | count +--------------------------------------------------------------------- 7 | 7 1 | 6 (2 rows) @@ -463,8 +463,8 @@ SELECT ORDER BY l_orderkey , 2 LIMIT 5; DEBUG: push down of limit count: 5 - l_orderkey | rank -------------+------ + l_orderkey | rank +--------------------------------------------------------------------- 1 | 1 2 | 1 3 | 1 @@ -482,8 +482,8 @@ SELECT GROUP BY l_orderkey ORDER BY l_orderkey , 3, 2 LIMIT 5; - l_orderkey | count | rank -------------+-------+------ + l_orderkey | count | rank +--------------------------------------------------------------------- 1 | 6 | 1 2 | 1 | 1 3 | 6 | 1 @@ -498,8 +498,8 @@ SELECT GROUP BY l_orderkey, l_linenumber ORDER BY l_orderkey , count(*) OVER (partition by l_orderkey), count(*), l_linenumber LIMIT 5; - l_orderkey | l_linenumber | count | count -------------+--------------+-------+------- + l_orderkey | l_linenumber | count | count +--------------------------------------------------------------------- 1 | 1 | 1 | 6 2 | 1 | 1 | 1 3 | 1 | 1 | 6 @@ -512,10 +512,10 @@ SELECT DISTINCT ON (RANK() OVER (partition by l_orderkey)) l_orderkey, RANK() OVER (partition by l_orderkey) FROM lineitem_hash GROUP BY l_orderkey - ORDER BY 2 DESC, 1 + ORDER BY 2 DESC, 1 LIMIT 5; - l_orderkey | rank -------------+------ + l_orderkey | rank +--------------------------------------------------------------------- 1 | 1 (1 row) diff --git a/src/test/regress/expected/multi_limit_clause_approximate.out b/src/test/regress/expected/multi_limit_clause_approximate.out index 864a5df6d..1855182a2 100644 --- a/src/test/regress/expected/multi_limit_clause_approximate.out +++ b/src/test/regress/expected/multi_limit_clause_approximate.out @@ -9,8 +9,8 @@ SET client_min_messages TO DEBUG1; SELECT l_partkey, sum(l_partkey * (1 + l_suppkey)) AS aggregate FROM lineitem GROUP BY l_partkey ORDER BY aggregate DESC LIMIT 10; - l_partkey | aggregate ------------+------------ + l_partkey | aggregate +--------------------------------------------------------------------- 194541 | 3727794642 160895 | 3671463005 183486 | 3128069328 @@ -29,8 +29,8 @@ SELECT l_partkey, sum(l_partkey * (1 + l_suppkey)) AS aggregate FROM lineitem GROUP BY l_partkey ORDER BY aggregate DESC LIMIT 10; DEBUG: push down of limit count: 600 - l_partkey | aggregate ------------+------------ + l_partkey | aggregate +--------------------------------------------------------------------- 194541 | 3727794642 160895 | 3671463005 183486 | 3128069328 @@ -51,8 +51,8 @@ SELECT c_custkey, c_name, count(*) as lineitem_count WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey GROUP BY c_custkey, c_name ORDER BY lineitem_count DESC, c_custkey LIMIT 10; - c_custkey | c_name | lineitem_count ------------+--------------------+---------------- + c_custkey | c_name | lineitem_count +--------------------------------------------------------------------- 43 | Customer#000000043 | 42 370 | Customer#000000370 | 40 79 | Customer#000000079 | 38 @@ -73,8 +73,8 @@ SELECT c_custkey, c_name, count(*) as lineitem_count GROUP BY c_custkey, c_name ORDER BY lineitem_count DESC, c_custkey LIMIT 10; DEBUG: push down of limit count: 150 - c_custkey | c_name | lineitem_count ------------+--------------------+---------------- + c_custkey | c_name | lineitem_count +--------------------------------------------------------------------- 43 | Customer#000000043 | 42 370 | Customer#000000370 | 40 79 | Customer#000000079 | 38 @@ -93,8 +93,8 @@ DEBUG: push down of limit count: 150 SELECT l_partkey, avg(l_suppkey) AS average FROM lineitem GROUP BY l_partkey ORDER BY average DESC, l_partkey LIMIT 10; - l_partkey | average ------------+----------------------- + l_partkey | average +--------------------------------------------------------------------- 9998 | 9999.0000000000000000 102466 | 9997.0000000000000000 184959 | 9996.0000000000000000 @@ -112,8 +112,8 @@ SELECT l_partkey, avg(l_suppkey) AS average FROM lineitem SELECT l_partkey, round(sum(l_suppkey)) AS complex_expression FROM lineitem GROUP BY l_partkey ORDER BY complex_expression DESC LIMIT 10; - l_partkey | complex_expression ------------+-------------------- + l_partkey | complex_expression +--------------------------------------------------------------------- 160895 | 22816 194541 | 19160 37018 | 19044 @@ -130,8 +130,8 @@ SELECT l_partkey, round(sum(l_suppkey)) AS complex_expression FROM lineitem SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 10.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC; - count_quantity | l_quantity -----------------+------------ + count_quantity | l_quantity +--------------------------------------------------------------------- 227 | 3.00 232 | 7.00 237 | 2.00 diff --git a/src/test/regress/expected/multi_master_protocol.out b/src/test/regress/expected/multi_master_protocol.out index 0381679aa..d38a5865d 100644 --- a/src/test/regress/expected/multi_master_protocol.out +++ b/src/test/regress/expected/multi_master_protocol.out @@ -5,29 +5,29 @@ SET citus.next_shard_id TO 740000; SELECT part_storage_type, part_key, part_replica_count, part_max_size, part_placement_policy FROM master_get_table_metadata('lineitem'); - part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy --------------------+------------+--------------------+---------------+----------------------- + part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy +--------------------------------------------------------------------- t | l_orderkey | 2 | 1536000 | 2 (1 row) SELECT * FROM master_get_table_ddl_events('lineitem') order by 1; - master_get_table_ddl_events ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + master_get_table_ddl_events +--------------------------------------------------------------------- ALTER TABLE public.lineitem ADD CONSTRAINT lineitem_pkey PRIMARY KEY (l_orderkey, l_linenumber) ALTER TABLE public.lineitem OWNER TO postgres - CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate) TABLESPACE pg_default + CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate) CREATE TABLE public.lineitem (l_orderkey bigint NOT NULL, l_partkey integer NOT NULL, l_suppkey integer NOT NULL, l_linenumber integer NOT NULL, l_quantity numeric(15,2) NOT NULL, l_extendedprice numeric(15,2) NOT NULL, l_discount numeric(15,2) NOT NULL, l_tax numeric(15,2) NOT NULL, l_returnflag character(1) NOT NULL, l_linestatus character(1) NOT NULL, l_shipdate date NOT NULL, l_commitdate date NOT NULL, l_receiptdate date NOT NULL, l_shipinstruct character(25) NOT NULL, l_shipmode character(10) NOT NULL, l_comment character varying(44) NOT NULL) (4 rows) SELECT * FROM master_get_new_shardid(); - master_get_new_shardid ------------------------- + master_get_new_shardid +--------------------------------------------------------------------- 740000 (1 row) SELECT * FROM master_get_active_worker_nodes(); - node_name | node_port ------------+----------- + node_name | node_port +--------------------------------------------------------------------- localhost | 57638 localhost | 57637 (2 rows) diff --git a/src/test/regress/expected/multi_metadata_access.out b/src/test/regress/expected/multi_metadata_access.out index fa6e09250..ed11ff12b 100644 --- a/src/test/regress/expected/multi_metadata_access.out +++ b/src/test/regress/expected/multi_metadata_access.out @@ -18,8 +18,8 @@ WHERE AND ext.extname = 'citus' AND nsp.nspname = 'pg_catalog' AND NOT has_table_privilege(pg_class.oid, 'select'); - oid ------------------- + oid +--------------------------------------------------------------------- pg_dist_authinfo (1 row) diff --git a/src/test/regress/expected/multi_metadata_attributes.out b/src/test/regress/expected/multi_metadata_attributes.out index abcef2178..91d927c18 100644 --- a/src/test/regress/expected/multi_metadata_attributes.out +++ b/src/test/regress/expected/multi_metadata_attributes.out @@ -8,7 +8,7 @@ SELECT attrelid::regclass, attname, atthasmissing, attmissingval FROM pg_attribute WHERE atthasmissing AND attrelid NOT IN ('pg_dist_node'::regclass) ORDER BY attrelid, attname; - attrelid | attname | atthasmissing | attmissingval -----------+---------+---------------+--------------- + attrelid | attname | atthasmissing | attmissingval +--------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 72cc80e1d..ad27cf1a3 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -18,15 +18,15 @@ COMMENT ON FUNCTION master_metadata_snapshot() IS 'commands to create the metadata snapshot'; -- Show that none of the existing tables are qualified to be MX tables SELECT * FROM pg_dist_partition WHERE partmethod='h' AND repmodel='s'; - logicalrelid | partmethod | partkey | colocationid | repmodel ---------------+------------+---------+--------------+---------- + logicalrelid | partmethod | partkey | colocationid | repmodel +--------------------------------------------------------------------- (0 rows) -- Show that, with no MX tables, metadata snapshot contains only the delete commands, -- pg_dist_node entries and reference tables SELECT unnest(master_metadata_snapshot()) order by 1; - unnest ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + unnest +--------------------------------------------------------------------- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default') SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition TRUNCATE pg_dist_node CASCADE @@ -35,15 +35,15 @@ SELECT unnest(master_metadata_snapshot()) order by 1; -- Create a test table with constraints and SERIAL CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL); SELECT master_create_distributed_table('mx_test_table', 'col_1', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('mx_test_table', 8, 1); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) -- Set the replication model of the test table to streaming replication so that it is @@ -51,8 +51,8 @@ SELECT master_create_worker_shards('mx_test_table', 8, 1); UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='mx_test_table'::regclass; -- Show that the created MX table is included in the metadata snapshot SELECT unnest(master_metadata_snapshot()) order by 1; - unnest --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + unnest +--------------------------------------------------------------------- ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE public.mx_test_table OWNER TO postgres @@ -71,8 +71,8 @@ SELECT unnest(master_metadata_snapshot()) order by 1; -- Show that CREATE INDEX commands are included in the metadata snapshot CREATE INDEX mx_index ON mx_test_table(col_2); SELECT unnest(master_metadata_snapshot()) order by 1; - unnest --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + unnest +--------------------------------------------------------------------- ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE public.mx_test_table OWNER TO postgres @@ -95,8 +95,8 @@ ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema; WARNING: not propagating ALTER ... SET SCHEMA commands to worker nodes HINT: Connect to worker nodes directly to manually change schemas of affected objects. SELECT unnest(master_metadata_snapshot()) order by 1; - unnest ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + unnest +--------------------------------------------------------------------- ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres @@ -116,15 +116,15 @@ SELECT unnest(master_metadata_snapshot()) order by 1; -- Show that append distributed tables are not included in the metadata snapshot CREATE TABLE non_mx_test_table (col_1 int, col_2 text); SELECT master_create_distributed_table('non_mx_test_table', 'col_1', 'append'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table'::regclass; SELECT unnest(master_metadata_snapshot()) order by 1; - unnest ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + unnest +--------------------------------------------------------------------- ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres @@ -144,8 +144,8 @@ SELECT unnest(master_metadata_snapshot()) order by 1; -- Show that range distributed tables are not included in the metadata snapshot UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass; SELECT unnest(master_metadata_snapshot()) order by 1; - unnest ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + unnest +--------------------------------------------------------------------- ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres @@ -165,74 +165,74 @@ SELECT unnest(master_metadata_snapshot()) order by 1; -- Test start_metadata_sync_to_node UDF -- Ensure that hasmetadata=false for all nodes SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- Ensure it works when run on a secondary node SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary'); - master_add_node ------------------ + master_add_node +--------------------------------------------------------------------- 4 (1 row) SELECT start_metadata_sync_to_node('localhost', 8888); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; - hasmetadata -------------- + hasmetadata +--------------------------------------------------------------------- t (1 row) SELECT stop_metadata_sync_to_node('localhost', 8888); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; - hasmetadata -------------- + hasmetadata +--------------------------------------------------------------------- f (1 row) -- Add a node to another cluster to make sure it's also synced SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); - master_add_secondary_node ---------------------------- + master_add_secondary_node +--------------------------------------------------------------------- 5 (1 row) -- Run start_metadata_sync_to_node and check that it marked hasmetadata for that worker SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port; - nodeid | hasmetadata ---------+------------- + nodeid | hasmetadata +--------------------------------------------------------------------- 1 | t (1 row) -- Check that the metadata has been copied to the worker \c - - - :worker_1_port SELECT * FROM pg_dist_local_group; - groupid ---------- + groupid +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM pg_dist_node ORDER BY nodeid; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------+---------+-----------+----------+----------+-------------+----------+-----------+----------------+----------------+------------------ + nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards +--------------------------------------------------------------------- 1 | 1 | localhost | 57637 | default | t | t | primary | default | f | t 2 | 2 | localhost | 57638 | default | f | t | primary | default | f | t 4 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t @@ -240,14 +240,14 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; (4 rows) SELECT * FROM pg_dist_partition ORDER BY logicalrelid; - logicalrelid | partmethod | partkey | colocationid | repmodel ----------------------------------+------------+------------------------------------------------------------------------------------------------------------------------+--------------+---------- + logicalrelid | partmethod | partkey | colocationid | repmodel +--------------------------------------------------------------------- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 0 | s (1 row) SELECT * FROM pg_dist_shard ORDER BY shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ----------------------------------+---------+--------------+---------------+--------------- + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue +--------------------------------------------------------------------- mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 @@ -259,8 +259,8 @@ SELECT * FROM pg_dist_shard ORDER BY shardid; (8 rows) SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; - shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+-----------+----------+------------- + shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- 1310000 | 1 | 0 | localhost | 57637 | 100000 1310001 | 1 | 0 | localhost | 57638 | 100001 1310002 | 1 | 0 | localhost | 57637 | 100002 @@ -272,37 +272,37 @@ SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; (8 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; - Column | Type | Modifiers ---------+---------+--------------------------------------------------------------------------------- - col_1 | integer | + Column | Type | Modifiers +--------------------------------------------------------------------- + col_1 | integer | col_2 | text | not null col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) (3 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_testing_schema.mx_test_table_col_1_key'::regclass; - Column | Type | Definition ---------+---------+------------ + Column | Type | Definition +--------------------------------------------------------------------- col_1 | integer | col_1 (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_testing_schema.mx_index'::regclass; - Column | Type | Definition ---------+------+------------ + Column | Type | Definition +--------------------------------------------------------------------- col_2 | text | col_2 (1 row) -- Check that pg_dist_colocation is not synced SELECT * FROM pg_dist_colocation ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- (0 rows) -- Make sure that truncate trigger has been set for the MX table on worker SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -317,29 +317,29 @@ CREATE TABLE mx_testing_schema.fk_test_1 (col1 int, col2 text, col3 int, UNIQUE( CREATE TABLE mx_testing_schema_2.fk_test_2 (col1 int, col2 int, col3 text, FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1 (col1, col3)); SELECT create_distributed_table('mx_testing_schema.fk_test_1', 'col1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) -- Check that foreign key metadata exists on the worker \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_testing_schema_2.fk_test_2'::regclass; - Constraint | Definition ---------------------------+----------------------------------------------------------------------------- - fk_test_2_col1_col2_fkey | FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1(col1, col3) + Constraint | Definition +--------------------------------------------------------------------- + fk_test_2_col1_fkey | FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1(col1, col3) (1 row) \c - - - :master_port @@ -350,27 +350,27 @@ RESET citus.replication_model; -- Check that repeated calls to start_metadata_sync_to_node has no side effects \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port SELECT * FROM pg_dist_local_group; - groupid ---------- + groupid +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM pg_dist_node ORDER BY nodeid; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------+---------+-----------+----------+----------+-------------+----------+-----------+----------------+----------------+------------------ + nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards +--------------------------------------------------------------------- 1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t 2 | 2 | localhost | 57638 | default | f | t | primary | default | f | t 4 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t @@ -378,14 +378,14 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; (4 rows) SELECT * FROM pg_dist_partition ORDER BY logicalrelid; - logicalrelid | partmethod | partkey | colocationid | repmodel ----------------------------------+------------+------------------------------------------------------------------------------------------------------------------------+--------------+---------- + logicalrelid | partmethod | partkey | colocationid | repmodel +--------------------------------------------------------------------- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 0 | s (1 row) SELECT * FROM pg_dist_shard ORDER BY shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ----------------------------------+---------+--------------+---------------+--------------- + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue +--------------------------------------------------------------------- mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 @@ -397,8 +397,8 @@ SELECT * FROM pg_dist_shard ORDER BY shardid; (8 rows) SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; - shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+-----------+----------+------------- + shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- 1310000 | 1 | 0 | localhost | 57637 | 100000 1310001 | 1 | 0 | localhost | 57638 | 100001 1310002 | 1 | 0 | localhost | 57637 | 100002 @@ -410,30 +410,30 @@ SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; (8 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; - Column | Type | Modifiers ---------+---------+--------------------------------------------------------------------------------- - col_1 | integer | + Column | Type | Modifiers +--------------------------------------------------------------------- + col_1 | integer | col_2 | text | not null col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) (3 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_testing_schema.mx_test_table_col_1_key'::regclass; - Column | Type | Definition ---------+---------+------------ + Column | Type | Definition +--------------------------------------------------------------------- col_1 | integer | col_1 (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_testing_schema.mx_index'::regclass; - Column | Type | Definition ---------+------+------------ + Column | Type | Definition +--------------------------------------------------------------------- col_2 | text | col_2 (1 row) SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -444,8 +444,8 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_port); ERROR: start_metadata_sync_to_node cannot run inside a transaction block ROLLBACK; SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; - hasmetadata -------------- + hasmetadata +--------------------------------------------------------------------- f (1 row) @@ -454,21 +454,21 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) CREATE TABLE mx_query_test (a int, b text, c int); SELECT create_distributed_table('mx_query_test', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_query_test'::regclass; - repmodel ----------- + repmodel +--------------------------------------------------------------------- s (1 row) @@ -479,8 +479,8 @@ INSERT INTO mx_query_test VALUES (4, 'four', 16); INSERT INTO mx_query_test VALUES (5, 'five', 24); \c - - - :worker_1_port SELECT * FROM mx_query_test ORDER BY a; - a | b | c ----+-------+---- + a | b | c +--------------------------------------------------------------------- 1 | one | 1 2 | two | 4 3 | three | 9 @@ -492,8 +492,8 @@ INSERT INTO mx_query_test VALUES (6, 'six', 36); UPDATE mx_query_test SET c = 25 WHERE a = 5; \c - - - :master_port SELECT * FROM mx_query_test ORDER BY a; - a | b | c ----+-------+---- + a | b | c +--------------------------------------------------------------------- 1 | one | 1 2 | two | 4 3 | three | 9 @@ -507,34 +507,34 @@ DROP TABLE mx_query_test; -- Check that stop_metadata_sync_to_node function sets hasmetadata of the node to false \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port; - hasmetadata -------------- + hasmetadata +--------------------------------------------------------------------- t (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port; - hasmetadata -------------- + hasmetadata +--------------------------------------------------------------------- f (1 row) -- Test DDL propagation in MX tables SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SET citus.shard_count = 5; @@ -550,56 +550,56 @@ CREATE TABLE mx_test_schema_2.mx_table_2 (col1 int, col2 text); CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 (col2); ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col1) REFERENCES mx_test_schema_1.mx_table_1(col1); SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass; - Column | Type | Modifiers ---------+---------+----------- - col1 | integer | - col2 | text | + Column | Type | Modifiers +--------------------------------------------------------------------- + col1 | integer | + col2 | text | (2 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_test_schema_1.mx_table_1_col1_key'::regclass; - Column | Type | Definition ---------+---------+------------ + Column | Type | Definition +--------------------------------------------------------------------- col1 | integer | col1 (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_test_schema_1.mx_index_1'::regclass; - Column | Type | Definition ---------+---------+------------ + Column | Type | Definition +--------------------------------------------------------------------- col1 | integer | col1 (1 row) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_2.mx_table_2'::regclass; - Column | Type | Modifiers ---------+---------+----------- - col1 | integer | - col2 | text | + Column | Type | Modifiers +--------------------------------------------------------------------- + col1 | integer | + col2 | text | (2 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_test_schema_2.mx_index_2'::regclass; - Column | Type | Definition ---------+------+------------ + Column | Type | Definition +--------------------------------------------------------------------- col2 | text | col2 (1 row) SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_2.mx_table_2'::regclass; - Constraint | Definition -------------------+----------------------------------------------------------------- + Constraint | Definition +--------------------------------------------------------------------- mx_fk_constraint | FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1) (1 row) SELECT create_distributed_table('mx_test_schema_1.mx_table_1', 'col1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('mx_test_schema_2.mx_table_2', 'col1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Check that created tables are marked as streaming replicated tables @@ -612,8 +612,8 @@ WHERE OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass ORDER BY logicalrelid; - logicalrelid | repmodel ------------------------------+---------- + logicalrelid | repmodel +--------------------------------------------------------------------- mx_test_schema_1.mx_table_1 | s mx_test_schema_2.mx_table_2 | s (2 rows) @@ -628,8 +628,8 @@ WHERE OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass ORDER BY logicalrelid, shardid; - logicalrelid | shardid | nodename | nodeport ------------------------------+---------+-----------+---------- + logicalrelid | shardid | nodename | nodeport +--------------------------------------------------------------------- mx_test_schema_1.mx_table_1 | 1310020 | localhost | 57637 mx_test_schema_1.mx_table_1 | 1310021 | localhost | 57638 mx_test_schema_1.mx_table_1 | 1310022 | localhost | 57637 @@ -647,8 +647,8 @@ ORDER BY -- Check that tables are created \dt mx_test_schema_?.mx_table_? List of relations - Schema | Name | Type | Owner -------------------+------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- mx_test_schema_1 | mx_table_1 | table | postgres mx_test_schema_2 | mx_table_2 | table | postgres (2 rows) @@ -661,8 +661,8 @@ FROM WHERE logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass; - logicalrelid | repmodel ------------------------------+---------- + logicalrelid | repmodel +--------------------------------------------------------------------- mx_test_schema_1.mx_table_1 | s mx_test_schema_2.mx_table_2 | s (2 rows) @@ -677,8 +677,8 @@ WHERE OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass ORDER BY logicalrelid, shardid; - logicalrelid | shardid | nodename | nodeport ------------------------------+---------+-----------+---------- + logicalrelid | shardid | nodename | nodeport +--------------------------------------------------------------------- mx_test_schema_1.mx_table_1 | 1310020 | localhost | 57637 mx_test_schema_1.mx_table_1 | 1310021 | localhost | 57638 mx_test_schema_1.mx_table_1 | 1310022 | localhost | 57637 @@ -696,18 +696,18 @@ ORDER BY \d mx_test_schema_1.mx_table_1 \d mx_test_schema_2.mx_table_2 SELECT * FROM pg_dist_partition; - logicalrelid | partmethod | partkey | colocationid | repmodel ---------------+------------+---------+--------------+---------- + logicalrelid | partmethod | partkey | colocationid | repmodel +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_shard; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ---------------+---------+--------------+---------------+--------------- + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; - shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+----------+----------+------------- + shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- (0 rows) -- Check that CREATE INDEX statement is propagated @@ -719,15 +719,15 @@ ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_table_2_col1_key UNIQU \c - - - :worker_1_port SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_test_schema_2.mx_index_3'::regclass; - Column | Type | Definition ---------+---------+------------ + Column | Type | Definition +--------------------------------------------------------------------- col1 | integer | col1 (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_test_schema_2.mx_table_2_col1_key'::regclass; - Column | Type | Definition ---------+---------+------------ + Column | Type | Definition +--------------------------------------------------------------------- col1 | integer | col1 (1 row) @@ -739,8 +739,6 @@ DROP INDEX mx_test_schema_2.mx_index_3; SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_test_schema_2.mx_index_3'::regclass; ERROR: relation "mx_test_schema_2.mx_index_3" does not exist -LINE 2: relid = 'mx_test_schema_2.mx_index_3'::regclass; - ^ -- Check that ALTER TABLE statements are propagated \c - - - :master_port SET citus.multi_shard_commit_protocol TO '2pc'; @@ -756,16 +754,16 @@ REFERENCES mx_test_schema_2.mx_table_2(col1); \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass; - Column | Type | Modifiers ---------+---------+----------- - col1 | integer | - col2 | text | - col3 | integer | + Column | Type | Modifiers +--------------------------------------------------------------------- + col1 | integer | + col2 | text | + col3 | integer | (3 rows) SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass; - Constraint | Definition -------------------+----------------------------------------------------------------- + Constraint | Definition +--------------------------------------------------------------------- mx_fk_constraint | FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) (1 row) @@ -784,8 +782,8 @@ REFERENCES NOT VALID; \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass; - Constraint | Definition ---------------------+----------------------------------------------------------------- + Constraint | Definition +--------------------------------------------------------------------- mx_fk_constraint_2 | FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) (1 row) @@ -798,16 +796,16 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE mx_colocation_test_1 (a int); SELECT create_distributed_table('mx_colocation_test_1', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE mx_colocation_test_2 (a int); SELECT create_distributed_table('mx_colocation_test_2', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Check the colocation IDs of the created tables @@ -819,8 +817,8 @@ WHERE logicalrelid = 'mx_colocation_test_1'::regclass OR logicalrelid = 'mx_colocation_test_2'::regclass ORDER BY logicalrelid; - logicalrelid | colocationid -----------------------+-------------- + logicalrelid | colocationid +--------------------------------------------------------------------- mx_colocation_test_1 | 10000 mx_colocation_test_2 | 10000 (2 rows) @@ -843,9 +841,9 @@ WHERE OR logicalrelid = 'mx_colocation_test_2'::regclass; -- Mark tables colocated and see the changes on the master and the worker SELECT mark_tables_colocated('mx_colocation_test_1', ARRAY['mx_colocation_test_2']); - mark_tables_colocated ------------------------ - + mark_tables_colocated +--------------------------------------------------------------------- + (1 row) SELECT @@ -855,8 +853,8 @@ FROM WHERE logicalrelid = 'mx_colocation_test_1'::regclass OR logicalrelid = 'mx_colocation_test_2'::regclass; - logicalrelid | colocationid -----------------------+-------------- + logicalrelid | colocationid +--------------------------------------------------------------------- mx_colocation_test_1 | 10001 mx_colocation_test_2 | 10001 (2 rows) @@ -869,8 +867,8 @@ FROM WHERE logicalrelid = 'mx_colocation_test_1'::regclass OR logicalrelid = 'mx_colocation_test_2'::regclass; - logicalrelid | colocationid -----------------------+-------------- + logicalrelid | colocationid +--------------------------------------------------------------------- mx_colocation_test_1 | 10001 mx_colocation_test_2 | 10001 (2 rows) @@ -891,28 +889,28 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE mx_temp_drop_test (a int); SELECT create_distributed_table('mx_temp_drop_test', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_temp_drop_test'::regclass; - logicalrelid | repmodel --------------------+---------- + logicalrelid | repmodel +--------------------------------------------------------------------- mx_temp_drop_test | s (1 row) DROP TABLE mx_temp_drop_test; CREATE TABLE mx_temp_drop_test (a int); SELECT create_distributed_table('mx_temp_drop_test', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_temp_drop_test'::regclass; - logicalrelid | repmodel --------------------+---------- + logicalrelid | repmodel +--------------------------------------------------------------------- mx_temp_drop_test | s (1 row) @@ -923,38 +921,38 @@ SET citus.shard_count TO 3; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) -- sync table with serial column after create_distributed_table CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL); SELECT create_distributed_table('mx_table_with_small_sequence', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) DROP TABLE mx_table_with_small_sequence; -- Show that create_distributed_table works with a serial column CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL); SELECT create_distributed_table('mx_table_with_small_sequence', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO mx_table_with_small_sequence VALUES (0); @@ -966,116 +964,116 @@ SET citus.replication_model TO 'streaming'; -- Create an MX table with (BIGSERIAL) sequences CREATE TABLE mx_table_with_sequence(a int, b BIGSERIAL, c BIGSERIAL); SELECT create_distributed_table('mx_table_with_sequence', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; - Column | Type | Modifiers ---------+---------+-------------------------------------------------------------------- - a | integer | + Column | Type | Modifiers +--------------------------------------------------------------------- + a | integer | b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) (3 rows) \ds mx_table_with_sequence_b_seq List of relations - Schema | Name | Type | Owner ---------+------------------------------+----------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | mx_table_with_sequence_b_seq | sequence | postgres (1 row) \ds mx_table_with_sequence_c_seq List of relations - Schema | Name | Type | Owner ---------+------------------------------+----------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | mx_table_with_sequence_c_seq | sequence | postgres (1 row) -- Check that the sequences created on the metadata worker as well \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; - Column | Type | Modifiers ---------+---------+-------------------------------------------------------------------- - a | integer | + Column | Type | Modifiers +--------------------------------------------------------------------- + a | integer | b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) (3 rows) \ds mx_table_with_sequence_b_seq List of relations - Schema | Name | Type | Owner ---------+------------------------------+----------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | mx_table_with_sequence_b_seq | sequence | postgres (1 row) \ds mx_table_with_sequence_c_seq List of relations - Schema | Name | Type | Owner ---------+------------------------------+----------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | mx_table_with_sequence_c_seq | sequence | postgres (1 row) -- Check that the sequences on the worker have their own space SELECT nextval('mx_table_with_sequence_b_seq'); - nextval ------------------ + nextval +--------------------------------------------------------------------- 281474976710657 (1 row) SELECT nextval('mx_table_with_sequence_c_seq'); - nextval ------------------ + nextval +--------------------------------------------------------------------- 281474976710657 (1 row) -- Check that adding a new metadata node sets the sequence space correctly \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) \c - - - :worker_2_port SELECT groupid FROM pg_dist_local_group; - groupid ---------- + groupid +--------------------------------------------------------------------- 2 (1 row) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; - Column | Type | Modifiers ---------+---------+-------------------------------------------------------------------- - a | integer | + Column | Type | Modifiers +--------------------------------------------------------------------- + a | integer | b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) (3 rows) \ds mx_table_with_sequence_b_seq List of relations - Schema | Name | Type | Owner ---------+------------------------------+----------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | mx_table_with_sequence_b_seq | sequence | postgres (1 row) \ds mx_table_with_sequence_c_seq List of relations - Schema | Name | Type | Owner ---------+------------------------------+----------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | mx_table_with_sequence_c_seq | sequence | postgres (1 row) SELECT nextval('mx_table_with_sequence_b_seq'); - nextval ------------------ + nextval +--------------------------------------------------------------------- 562949953421313 (1 row) SELECT nextval('mx_table_with_sequence_c_seq'); - nextval ------------------ + nextval +--------------------------------------------------------------------- 562949953421313 (1 row) @@ -1084,8 +1082,8 @@ INSERT INTO mx_table_with_small_sequence VALUES (2), (4); \c - - - :master_port -- check our small sequence values SELECT a, b, c FROM mx_table_with_small_sequence ORDER BY a,b,c; - a | b | c ----+-----------+------ + a | b | c +--------------------------------------------------------------------- 0 | 1 | 1 1 | 268435457 | 4097 2 | 536870913 | 8193 @@ -1098,14 +1096,14 @@ DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence; \d mx_table_with_sequence \ds mx_table_with_sequence_b_seq List of relations - Schema | Name | Type | Owner ---------+------+------+------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- (0 rows) \ds mx_table_with_sequence_c_seq List of relations - Schema | Name | Type | Owner ---------+------+------+------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- (0 rows) -- Check that the sequences are dropped from the workers @@ -1113,28 +1111,28 @@ DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence; \d mx_table_with_sequence \ds mx_table_with_sequence_b_seq List of relations - Schema | Name | Type | Owner ---------+------+------+------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- (0 rows) \ds mx_table_with_sequence_c_seq List of relations - Schema | Name | Type | Owner ---------+------+------+------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- (0 rows) -- Check that the sequences are dropped from the workers \c - - - :worker_2_port \ds mx_table_with_sequence_b_seq List of relations - Schema | Name | Type | Owner ---------+------+------+------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- (0 rows) \ds mx_table_with_sequence_c_seq List of relations - Schema | Name | Type | Owner ---------+------+------+------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- (0 rows) -- Check that MX sequences play well with non-super users @@ -1148,9 +1146,9 @@ DELETE FROM pg_dist_placement; DELETE FROM pg_dist_partition; SELECT groupid AS old_worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) -- the master user needs superuser permissions to change the replication model @@ -1171,52 +1169,52 @@ CREATE TABLE mx_table (a int, b BIGSERIAL); SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT create_distributed_table('mx_table', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \c - postgres - :master_port SELECT master_add_node('localhost', :worker_2_port); - master_add_node ------------------ + master_add_node +--------------------------------------------------------------------- 6 (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) \c - mx_user - :worker_1_port SELECT nextval('mx_table_b_seq'); - nextval ------------------ + nextval +--------------------------------------------------------------------- 281474976710657 (1 row) INSERT INTO mx_table (a) VALUES (37); INSERT INTO mx_table (a) VALUES (38); SELECT * FROM mx_table ORDER BY a; - a | b -----+----------------- + a | b +--------------------------------------------------------------------- 37 | 281474976710658 38 | 281474976710659 (2 rows) \c - mx_user - :worker_2_port SELECT nextval('mx_table_b_seq'); - nextval ------------------- + nextval +--------------------------------------------------------------------- 1125899906842625 (1 row) INSERT INTO mx_table (a) VALUES (39); INSERT INTO mx_table (a) VALUES (40); SELECT * FROM mx_table ORDER BY a; - a | b -----+------------------ + a | b +--------------------------------------------------------------------- 37 | 281474976710658 38 | 281474976710659 39 | 1125899906842626 @@ -1244,9 +1242,9 @@ UPDATE pg_dist_placement WHERE groupid = :old_worker_2_group; \c - - - :master_port SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) DROP USER mx_user; @@ -1258,31 +1256,31 @@ DROP USER mx_user; \c - - - :master_port CREATE TABLE mx_ref (col_1 int, col_2 text); SELECT create_reference_table('mx_ref'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- make sure that adding/removing nodes doesn't cause -- multiple colocation entries for reference tables SELECT count(*) FROM pg_dist_colocation WHERE distributioncolumntype = 0; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) \dt mx_ref List of relations - Schema | Name | Type | Owner ---------+--------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | mx_ref | table | postgres (1 row) \c - - - :worker_1_port \dt mx_ref List of relations - Schema | Name | Type | Owner ---------+--------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | mx_ref | table | postgres (1 row) @@ -1296,8 +1294,8 @@ WHERE logicalrelid = 'mx_ref'::regclass ORDER BY nodeport; - logicalrelid | partmethod | repmodel | shardid | placementid | nodename | nodeport ---------------+------------+----------+---------+-------------+-----------+---------- + logicalrelid | partmethod | repmodel | shardid | placementid | nodename | nodeport +--------------------------------------------------------------------- mx_ref | n | t | 1310072 | 100072 | localhost | 57637 mx_ref | n | t | 1310072 | 100073 | localhost | 57638 (2 rows) @@ -1308,33 +1306,33 @@ SELECT shardid AS ref_table_shardid FROM pg_dist_shard WHERE logicalrelid='mx_re ALTER TABLE mx_ref ADD COLUMN col_3 NUMERIC DEFAULT 0; CREATE INDEX mx_ref_index ON mx_ref(col_1); SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass; - Column | Type | Modifiers ---------+---------+----------- - col_1 | integer | - col_2 | text | + Column | Type | Modifiers +--------------------------------------------------------------------- + col_1 | integer | + col_2 | text | col_3 | numeric | default 0 (3 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_ref_index'::regclass; - Column | Type | Definition ---------+---------+------------ + Column | Type | Definition +--------------------------------------------------------------------- col_1 | integer | col_1 (1 row) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass; - Column | Type | Modifiers ---------+---------+----------- - col_1 | integer | - col_2 | text | + Column | Type | Modifiers +--------------------------------------------------------------------- + col_1 | integer | + col_2 | text | col_3 | numeric | default 0 (3 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_ref_index'::regclass; - Column | Type | Definition ---------+---------+------------ + Column | Type | Definition +--------------------------------------------------------------------- col_1 | integer | col_1 (1 row) @@ -1344,22 +1342,18 @@ DROP TABLE mx_ref; SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_ref_index'::regclass; ERROR: relation "mx_ref_index" does not exist -LINE 2: relid = 'mx_ref_index'::regclass; - ^ \c - - - :worker_1_port SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_ref_index'::regclass; ERROR: relation "mx_ref_index" does not exist -LINE 2: relid = 'mx_ref_index'::regclass; - ^ SELECT * FROM pg_dist_shard WHERE shardid=:ref_table_shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ---------------+---------+--------------+---------------+--------------- + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_shard_placement WHERE shardid=:ref_table_shardid; - shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+----------+----------+------------- + shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- (0 rows) -- Check that master_add_node propagates the metadata about new placements of a reference table @@ -1371,23 +1365,23 @@ CREATE TABLE tmp_placement AS DELETE FROM pg_dist_placement WHERE groupid = :old_worker_2_group; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) CREATE TABLE mx_ref (col_1 int, col_2 text); SELECT create_reference_table('mx_ref'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass; - shardid | nodename | nodeport ----------+-----------+---------- + shardid | nodename | nodeport +--------------------------------------------------------------------- 1310073 | localhost | 57637 (1 row) @@ -1395,16 +1389,16 @@ WHERE logicalrelid='mx_ref'::regclass; SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass; - shardid | nodename | nodeport ----------+-----------+---------- + shardid | nodename | nodeport +--------------------------------------------------------------------- 1310073 | localhost | 57637 (1 row) \c - - - :master_port SELECT master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "mx_ref" to the node localhost:57638 - master_add_node ------------------ +NOTICE: Replicating reference table "mx_ref" to the node localhost:xxxxx + master_add_node +--------------------------------------------------------------------- 7 (1 row) @@ -1412,8 +1406,8 @@ SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass ORDER BY shardid, nodeport; - shardid | nodename | nodeport ----------+-----------+---------- + shardid | nodename | nodeport +--------------------------------------------------------------------- 1310073 | localhost | 57637 1310073 | localhost | 57638 (2 rows) @@ -1423,8 +1417,8 @@ SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass ORDER BY shardid, nodeport; - shardid | nodename | nodeport ----------+-----------+---------- + shardid | nodename | nodeport +--------------------------------------------------------------------- 1310073 | localhost | 57637 1310073 | localhost | 57638 (2 rows) @@ -1443,57 +1437,57 @@ UPDATE pg_dist_placement -- Confirm that shouldhaveshards is 'true' \c - - - :master_port select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ------------------- + shouldhaveshards +--------------------------------------------------------------------- t (1 row) \c - postgres - :worker_1_port select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ------------------- + shouldhaveshards +--------------------------------------------------------------------- t (1 row) -- Check that setting shouldhaveshards to false is correctly transferred to other mx nodes \c - - - :master_port SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', false); - master_set_node_property --------------------------- - + master_set_node_property +--------------------------------------------------------------------- + (1 row) select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ------------------- + shouldhaveshards +--------------------------------------------------------------------- f (1 row) \c - postgres - :worker_1_port select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ------------------- + shouldhaveshards +--------------------------------------------------------------------- f (1 row) -- Check that setting shouldhaveshards to true is correctly transferred to other mx nodes \c - postgres - :master_port SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', true); - master_set_node_property --------------------------- - + master_set_node_property +--------------------------------------------------------------------- + (1 row) select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ------------------- + shouldhaveshards +--------------------------------------------------------------------- t (1 row) \c - postgres - :worker_1_port select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ------------------- + shouldhaveshards +--------------------------------------------------------------------- t (1 row) @@ -1505,8 +1499,8 @@ select shouldhaveshards from pg_dist_node where nodeport = 8888; ALTER SYSTEM SET citus.metadata_sync_interval TO 300000; ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 300000; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) @@ -1514,79 +1508,79 @@ SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table_1(a int); SELECT create_distributed_table('dist_table_1', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) UPDATE pg_dist_node SET metadatasynced=false WHERE nodeport=:worker_1_port; SELECT hasmetadata, metadatasynced FROM pg_dist_node WHERE nodeport=:worker_1_port; - hasmetadata | metadatasynced --------------+---------------- + hasmetadata | metadatasynced +--------------------------------------------------------------------- t | f (1 row) CREATE TABLE dist_table_2(a int); SELECT create_distributed_table('dist_table_2', 'a'); -ERROR: localhost:57637 is a metadata node, but is out of sync +ERROR: localhost:xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. SELECT create_reference_table('dist_table_2'); -ERROR: localhost:57637 is a metadata node, but is out of sync +ERROR: localhost:xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. ALTER TABLE dist_table_1 ADD COLUMN b int; -ERROR: localhost:57637 is a metadata node, but is out of sync +ERROR: localhost:xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. SELECT master_add_node('localhost', :master_port, groupid => 0); -ERROR: localhost:57637 is a metadata node, but is out of sync +ERROR: localhost:xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. SELECT master_disable_node('localhost', :worker_1_port); -ERROR: Disabling localhost:57637 failed -DETAIL: localhost:57637 is a metadata node, but is out of sync +ERROR: Disabling localhost:xxxxx failed +DETAIL: localhost:xxxxx is a metadata node, but is out of sync HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them. SELECT master_disable_node('localhost', :worker_2_port); -ERROR: Disabling localhost:57638 failed -DETAIL: localhost:57637 is a metadata node, but is out of sync +ERROR: Disabling localhost:xxxxx failed +DETAIL: localhost:xxxxx is a metadata node, but is out of sync HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them. SELECT master_remove_node('localhost', :worker_1_port); -ERROR: localhost:57637 is a metadata node, but is out of sync +ERROR: localhost:xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. SELECT master_remove_node('localhost', :worker_2_port); -ERROR: localhost:57637 is a metadata node, but is out of sync +ERROR: localhost:xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. -- master_update_node should succeed SELECT nodeid AS worker_2_nodeid FROM pg_dist_node WHERE nodeport=:worker_2_port \gset SELECT master_update_node(:worker_2_nodeid, 'localhost', 4444); - master_update_node --------------------- - + master_update_node +--------------------------------------------------------------------- + (1 row) SELECT master_update_node(:worker_2_nodeid, 'localhost', :worker_2_port); - master_update_node --------------------- - + master_update_node +--------------------------------------------------------------------- + (1 row) ALTER SYSTEM SET citus.metadata_sync_interval TO DEFAULT; ALTER SYSTEM SET citus.metadata_sync_retry_interval TO DEFAULT; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) UPDATE pg_dist_node SET metadatasynced=true WHERE nodeport=:worker_1_port; -- Cleanup SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) DROP TABLE mx_test_schema_2.mx_table_2 CASCADE; diff --git a/src/test/regress/expected/multi_modifications.out b/src/test/regress/expected/multi_modifications.out index 8188853f8..92b3de6f0 100644 --- a/src/test/regress/expected/multi_modifications.out +++ b/src/test/regress/expected/multi_modifications.out @@ -26,32 +26,32 @@ CREATE TABLE range_partitioned ( LIKE limit_orders ); CREATE TABLE append_partitioned ( LIKE limit_orders ); SET citus.shard_count TO 2; SELECT create_distributed_table('limit_orders', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('multiple_hash', 'id', 'hash'); ERROR: column "id" of relation "multiple_hash" does not exist SELECT create_distributed_table('range_partitioned', 'id', 'range'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('append_partitioned', 'id', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; -- make a single shard that covers no partition values SELECT create_distributed_table('insufficient_shards', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 0 @@ -78,15 +78,15 @@ WHERE shardid = :new_shard_id; INSERT INTO limit_orders VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); SELECT COUNT(*) FROM limit_orders WHERE id = 32743; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- basic single-row INSERT with RETURNING INSERT INTO limit_orders VALUES (32744, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69) RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price --------+--------+-----------+--------------------------+------+------------- + id | symbol | bidder_id | placed_at | kind | limit_price +--------------------------------------------------------------------- 32744 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 (1 row) @@ -106,16 +106,16 @@ SET client_min_messages TO 'DEBUG2'; SELECT * FROM range_partitioned WHERE id = 32743; DEBUG: Creating router plan DEBUG: Plan is router executable - id | symbol | bidder_id | placed_at | kind | limit_price --------+--------+-----------+--------------------------+------+------------- + id | symbol | bidder_id | placed_at | kind | limit_price +--------------------------------------------------------------------- 32743 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 (1 row) SELECT * FROM append_partitioned WHERE id = 414123; DEBUG: Router planner does not support append-partitioned tables. DEBUG: Plan is router executable - id | symbol | bidder_id | placed_at | kind | limit_price ---------+--------+-----------+--------------------------+------+------------- + id | symbol | bidder_id | placed_at | kind | limit_price +--------------------------------------------------------------------- 414123 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 (1 row) @@ -134,8 +134,8 @@ HINT: Make sure the value for partition column "id" falls into a single shard. INSERT INTO limit_orders VALUES (12756, 'MSFT', 10959, '2013-05-08 07:29:23', 'sell', DEFAULT); SELECT COUNT(*) FROM limit_orders WHERE id = 12756; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -143,8 +143,8 @@ SELECT COUNT(*) FROM limit_orders WHERE id = 12756; INSERT INTO limit_orders VALUES (430, upper('ibm'), 214, timestamp '2003-01-28 10:31:17' + interval '5 hours', 'buy', sqrt(2)); SELECT COUNT(*) FROM limit_orders WHERE id = 430; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -188,8 +188,8 @@ INSERT INTO limit_orders VALUES (12037, 'GOOG', 5634, '2001-04-16 03:37:28', 'bu (12038, 'GOOG', 5634, '2001-04-17 03:37:28', 'buy', 2.50), (12039, 'GOOG', 5634, '2001-04-18 03:37:28', 'buy', 1.50); SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 12037 AND 12039; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) @@ -198,16 +198,16 @@ INSERT INTO limit_orders VALUES (22037, 'GOOG', 5634, now(), 'buy', 0.50), (22038, 'GOOG', 5634, now(), 'buy', 2.50), (22039, 'GOOG', 5634, now(), 'buy', 1.50) RETURNING id; - id -------- + id +--------------------------------------------------------------------- 22037 22038 22039 (3 rows) SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 22037 AND 22039; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) @@ -216,8 +216,8 @@ INSERT INTO limit_orders VALUES (random() * 10 + 70000, 'GOOG', 5634, now(), 'bu (random() * 10 + 80000, 'GOOG', 5634, now(), 'buy', 2.50), (random() * 10 + 80090, 'GOOG', 5634, now(), 'buy', 1.50); SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 70000 AND 90000; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) @@ -227,43 +227,43 @@ INSERT INTO limit_orders SELECT * FROM deleted_orders; -- test simple DELETE INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders WHERE id = 246; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) DELETE FROM limit_orders WHERE id = 246; SELECT COUNT(*) FROM limit_orders WHERE id = 246; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- test simple DELETE with RETURNING DELETE FROM limit_orders WHERE id = 430 RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+----------------- + id | symbol | bidder_id | placed_at | kind | limit_price +--------------------------------------------------------------------- 430 | IBM | 214 | Tue Jan 28 15:31:17 2003 | buy | 1.4142135623731 (1 row) SELECT COUNT(*) FROM limit_orders WHERE id = 430; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- DELETE with expression in WHERE clause INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders WHERE id = 246; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) DELETE FROM limit_orders WHERE id = (2 * 123); SELECT COUNT(*) FROM limit_orders WHERE id = 246; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -283,53 +283,53 @@ DELETE FROM limit_orders RETURNING id / 0; ERROR: division by zero \set VERBOSITY default SELECT * FROM limit_orders WHERE id = 412; - id | symbol | bidder_id | placed_at | kind | limit_price -----+--------+-----------+-----------+------+------------- + id | symbol | bidder_id | placed_at | kind | limit_price +--------------------------------------------------------------------- (0 rows) INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); -- simple UPDATE UPDATE limit_orders SET symbol = 'GM' WHERE id = 246; SELECT symbol FROM limit_orders WHERE id = 246; - symbol --------- + symbol +--------------------------------------------------------------------- GM (1 row) -- simple UPDATE with RETURNING UPDATE limit_orders SET symbol = 'GM' WHERE id = 246 RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+------------- + id | symbol | bidder_id | placed_at | kind | limit_price +--------------------------------------------------------------------- 246 | GM | 162 | Mon Jul 02 16:32:15 2007 | sell | 20.69 (1 row) -- expression UPDATE UPDATE limit_orders SET bidder_id = 6 * 3 WHERE id = 246; SELECT bidder_id FROM limit_orders WHERE id = 246; - bidder_id ------------ + bidder_id +--------------------------------------------------------------------- 18 (1 row) -- expression UPDATE with RETURNING UPDATE limit_orders SET bidder_id = 6 * 5 WHERE id = 246 RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+------------- + id | symbol | bidder_id | placed_at | kind | limit_price +--------------------------------------------------------------------- 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | sell | 20.69 (1 row) -- multi-column UPDATE UPDATE limit_orders SET (kind, limit_price) = ('buy', DEFAULT) WHERE id = 246; SELECT kind, limit_price FROM limit_orders WHERE id = 246; - kind | limit_price -------+------------- + kind | limit_price +--------------------------------------------------------------------- buy | 0.00 (1 row) -- multi-column UPDATE with RETURNING UPDATE limit_orders SET (kind, limit_price) = ('buy', 999) WHERE id = 246 RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+------------- + id | symbol | bidder_id | placed_at | kind | limit_price +--------------------------------------------------------------------- 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | buy | 999 (1 row) @@ -358,22 +358,22 @@ ALTER TABLE renamed_orders RENAME TO limit_orders_750000; -- or the insert succeeded and placement marked unhealthy \c - - - :worker_1_port SELECT count(*) FROM limit_orders_750000 WHERE id = 276; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) \c - - - :worker_2_port SELECT count(*) FROM limit_orders_750000 WHERE id = 276; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) \c - - - :master_port SELECT count(*) FROM limit_orders WHERE id = 276; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -383,8 +383,8 @@ FROM pg_dist_shard_placement AS sp, WHERE sp.shardid = s.shardid AND sp.shardstate = 3 AND s.logicalrelid = 'limit_orders'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -409,8 +409,8 @@ AND sp.nodename = 'localhost' AND sp.nodeport = :worker_1_port AND sp.shardstate = 1 AND s.logicalrelid = 'limit_orders'::regclass; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -440,8 +440,8 @@ ERROR: relation bidders is not distributed WITH deleted_orders AS (INSERT INTO limit_orders VALUES (399, 'PDR', 14, '2017-07-02 16:32:15', 'sell', 43)) UPDATE limit_orders SET symbol = 'GM'; SELECT symbol, bidder_id FROM limit_orders WHERE id = 246; - symbol | bidder_id ---------+----------- + symbol | bidder_id +--------------------------------------------------------------------- GM | 30 (1 row) @@ -452,15 +452,15 @@ UPDATE limit_orders SET bidder_id = bidder_id + 1 WHERE id = 246; -- IMMUTABLE functions are allowed UPDATE limit_orders SET symbol = LOWER(symbol) WHERE id = 246; SELECT symbol, bidder_id FROM limit_orders WHERE id = 246; - symbol | bidder_id ---------+----------- + symbol | bidder_id +--------------------------------------------------------------------- gm | 247 (1 row) -- IMMUTABLE functions are allowed -- even in returning UPDATE limit_orders SET symbol = UPPER(symbol) WHERE id = 246 RETURNING id, LOWER(symbol), symbol; - id | lower | symbol ------+-------+-------- + id | lower | symbol +--------------------------------------------------------------------- 246 | gm | GM (1 row) @@ -489,8 +489,8 @@ UPDATE limit_orders SET array_of_values = stable_append(array_of_values, 3) WHERE id = 246; ERROR: STABLE functions used in UPDATE queries cannot be called with column references SELECT array_of_values FROM limit_orders WHERE id = 246; - array_of_values ------------------ + array_of_values +--------------------------------------------------------------------- {1,2} (1 row) @@ -502,8 +502,8 @@ UPDATE limit_orders SET bidder_id = temp_strict_func(1, null) WHERE id = 246; ERROR: null value in column "bidder_id" violates not-null constraint \set VERBOSITY default SELECT array_of_values FROM limit_orders WHERE id = 246; - array_of_values ------------------ + array_of_values +--------------------------------------------------------------------- {1,2} (1 row) @@ -519,8 +519,8 @@ INSERT INTO multiple_hash VALUES ('0', '4'); INSERT INTO multiple_hash VALUES ('0', '5'); INSERT INTO multiple_hash VALUES ('0', '6'); UPDATE multiple_hash SET data = data ||'-1' WHERE category = '0' RETURNING *; - category | data -----------+------ + category | data +--------------------------------------------------------------------- 0 | 1-1 0 | 2-1 0 | 3-1 @@ -530,8 +530,8 @@ UPDATE multiple_hash SET data = data ||'-1' WHERE category = '0' RETURNING *; (6 rows) DELETE FROM multiple_hash WHERE category = '0' RETURNING *; - category | data -----------+------ + category | data +--------------------------------------------------------------------- 0 | 1-1 0 | 2-1 0 | 3-1 @@ -555,8 +555,8 @@ INSERT 0 1 INSERT INTO multiple_hash VALUES ('2', '3'); INSERT 0 1 INSERT INTO multiple_hash VALUES ('2', '3') RETURNING *; - category | data -----------+------ + category | data +--------------------------------------------------------------------- 2 | 3 (1 row) @@ -570,8 +570,8 @@ UPDATE multiple_hash SET data = data ||'-2' WHERE category = '1'; UPDATE 3 -- three rows, with RETURNING UPDATE multiple_hash SET data = data ||'-2' WHERE category = '1' RETURNING category; - category ----------- + category +--------------------------------------------------------------------- 1 1 1 @@ -580,8 +580,8 @@ UPDATE multiple_hash SET data = data ||'-2' WHERE category = '1' RETURNING categ UPDATE 3 -- check SELECT * FROM multiple_hash WHERE category = '1' ORDER BY category, data; - category | data -----------+--------- + category | data +--------------------------------------------------------------------- 1 | 1-1-2-2 1 | 2-2-2 1 | 3-2-2 @@ -596,8 +596,8 @@ DELETE FROM multiple_hash WHERE category = '2'; DELETE 3 -- three rows, with RETURNING DELETE FROM multiple_hash WHERE category = '1' RETURNING category; - category ----------- + category +--------------------------------------------------------------------- 1 1 1 @@ -606,13 +606,13 @@ DELETE FROM multiple_hash WHERE category = '1' RETURNING category; DELETE 3 -- check SELECT * FROM multiple_hash WHERE category = '1' ORDER BY category, data; - category | data -----------+------ + category | data +--------------------------------------------------------------------- (0 rows) SELECT * FROM multiple_hash WHERE category = '2' ORDER BY category, data; - category | data -----------+------ + category | data +--------------------------------------------------------------------- (0 rows) -- verify interaction of default values, SERIAL, and RETURNING @@ -620,26 +620,26 @@ SELECT * FROM multiple_hash WHERE category = '2' ORDER BY category, data; CREATE TABLE app_analytics_events (id serial, app_id integer, name text); SET citus.shard_count TO 4; SELECT create_distributed_table('app_analytics_events', 'app_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO app_analytics_events VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; - id ----- + id +--------------------------------------------------------------------- 1 (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (102, 'Wayz') RETURNING id; - id ----- + id +--------------------------------------------------------------------- 2 (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (103, 'Mynt') RETURNING *; - id | app_id | name -----+--------+------ + id | app_id | name +--------------------------------------------------------------------- 3 | 103 | Mynt (1 row) @@ -647,42 +647,42 @@ DROP TABLE app_analytics_events; -- again with serial in the partition column CREATE TABLE app_analytics_events (id serial, app_id integer, name text); SELECT create_distributed_table('app_analytics_events', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO app_analytics_events VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; - id ----- + id +--------------------------------------------------------------------- 1 (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (102, 'Wayz') RETURNING id; - id ----- + id +--------------------------------------------------------------------- 2 (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (103, 'Mynt') RETURNING *; - id | app_id | name -----+--------+------ + id | app_id | name +--------------------------------------------------------------------- 3 | 103 | Mynt (1 row) -- Test multi-row insert with serial in the partition column INSERT INTO app_analytics_events (app_id, name) VALUES (104, 'Wayz'), (105, 'Mynt') RETURNING *; - id | app_id | name -----+--------+------ + id | app_id | name +--------------------------------------------------------------------- 4 | 104 | Wayz 5 | 105 | Mynt (2 rows) INSERT INTO app_analytics_events (id, name) VALUES (DEFAULT, 'Foo'), (300, 'Wah') RETURNING *; - id | app_id | name ------+--------+------ + id | app_id | name +--------------------------------------------------------------------- 6 | | Foo 300 | | Wah (2 rows) @@ -691,50 +691,50 @@ PREPARE prep(varchar) AS INSERT INTO app_analytics_events (id, name) VALUES (DEFAULT, $1 || '.1'), (400 , $1 || '.2') RETURNING *; EXECUTE prep('version-1'); - id | app_id | name ------+--------+------------- + id | app_id | name +--------------------------------------------------------------------- 7 | | version-1.1 400 | | version-1.2 (2 rows) EXECUTE prep('version-2'); - id | app_id | name ------+--------+------------- + id | app_id | name +--------------------------------------------------------------------- 8 | | version-2.1 400 | | version-2.2 (2 rows) EXECUTE prep('version-3'); - id | app_id | name ------+--------+------------- + id | app_id | name +--------------------------------------------------------------------- 9 | | version-3.1 400 | | version-3.2 (2 rows) EXECUTE prep('version-4'); - id | app_id | name ------+--------+------------- + id | app_id | name +--------------------------------------------------------------------- 10 | | version-4.1 400 | | version-4.2 (2 rows) EXECUTE prep('version-5'); - id | app_id | name ------+--------+------------- + id | app_id | name +--------------------------------------------------------------------- 11 | | version-5.1 400 | | version-5.2 (2 rows) EXECUTE prep('version-6'); - id | app_id | name ------+--------+------------- + id | app_id | name +--------------------------------------------------------------------- 12 | | version-6.1 400 | | version-6.2 (2 rows) SELECT * FROM app_analytics_events ORDER BY id, name; - id | app_id | name ------+--------+----------------- + id | app_id | name +--------------------------------------------------------------------- 1 | 101 | Fauxkemon Geaux 2 | 102 | Wayz 3 | 103 | Mynt @@ -761,15 +761,15 @@ TRUNCATE app_analytics_events; ALTER TABLE app_analytics_events DROP COLUMN app_id; INSERT INTO app_analytics_events (name) VALUES ('Wayz'), ('Mynt') RETURNING *; - id | name -----+------ + id | name +--------------------------------------------------------------------- 13 | Wayz 14 | Mynt (2 rows) SELECT * FROM app_analytics_events ORDER BY id; - id | name -----+------ + id | name +--------------------------------------------------------------------- 13 | Wayz 14 | Mynt (2 rows) @@ -778,23 +778,23 @@ DROP TABLE app_analytics_events; -- Test multi-row insert with a dropped column before the partition column CREATE TABLE app_analytics_events (id int default 3, app_id integer, name text); SELECT create_distributed_table('app_analytics_events', 'name', colocate_with => 'none'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE app_analytics_events DROP COLUMN app_id; INSERT INTO app_analytics_events (name) VALUES ('Wayz'), ('Mynt') RETURNING *; - id | name -----+------ + id | name +--------------------------------------------------------------------- 3 | Mynt 3 | Wayz (2 rows) SELECT * FROM app_analytics_events WHERE name = 'Wayz'; - id | name -----+------ + id | name +--------------------------------------------------------------------- 3 | Wayz (1 row) @@ -802,22 +802,22 @@ DROP TABLE app_analytics_events; -- Test multi-row insert with serial in a reference table CREATE TABLE app_analytics_events (id serial, app_id integer, name text); SELECT create_reference_table('app_analytics_events'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (104, 'Wayz'), (105, 'Mynt') RETURNING *; - id | app_id | name -----+--------+------ + id | app_id | name +--------------------------------------------------------------------- 1 | 104 | Wayz 2 | 105 | Mynt (2 rows) SELECT * FROM app_analytics_events ORDER BY id; - id | app_id | name -----+--------+------ + id | app_id | name +--------------------------------------------------------------------- 1 | 104 | Wayz 2 | 105 | Mynt (2 rows) @@ -826,22 +826,22 @@ DROP TABLE app_analytics_events; -- Test multi-row insert with serial in a non-partition column CREATE TABLE app_analytics_events (id int, app_id serial, name text); SELECT create_distributed_table('app_analytics_events', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO app_analytics_events (id, name) VALUES (99, 'Wayz'), (98, 'Mynt') RETURNING name, app_id; - name | app_id -------+-------- + name | app_id +--------------------------------------------------------------------- Mynt | 2 Wayz | 1 (2 rows) SELECT * FROM app_analytics_events ORDER BY id; - id | app_id | name -----+--------+------ + id | app_id | name +--------------------------------------------------------------------- 98 | 2 | Mynt 99 | 1 | Wayz (2 rows) @@ -856,15 +856,15 @@ CREATE TABLE summary_table ( count int, uniques int); SELECT create_distributed_table('raw_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('summary_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO raw_table VALUES (1, 100); @@ -883,10 +883,10 @@ UPDATE summary_table SET uniques = 0 WHERE false; UPDATE summary_table SET uniques = 0 WHERE null; UPDATE summary_table SET uniques = 0 WHERE null > jsonb_build_array(); SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+---------------+-------+--------- - 1 | | | | - 2 | | | | + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- + 1 | | | | + 2 | | | | (2 rows) UPDATE summary_table SET average_value = average_query.average FROM ( @@ -894,10 +894,10 @@ UPDATE summary_table SET average_value = average_query.average FROM ( ) average_query WHERE id = 1; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- - 1 | | 200.0000000000000000 | | - 2 | | | | + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- + 1 | | 200.0000000000000000 | | + 2 | | | | (2 rows) -- try different syntax @@ -905,39 +905,39 @@ UPDATE summary_table SET (min_value, average_value) = (SELECT min(value), avg(value) FROM raw_table WHERE id = 2) WHERE id = 2; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- - 1 | | 200.0000000000000000 | | - 2 | 400 | 450.0000000000000000 | | + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- + 1 | | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) UPDATE summary_table SET min_value = 100 WHERE id IN (SELECT id FROM raw_table WHERE id = 1 and value > 100) AND id = 1; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- - 1 | 100 | 200.0000000000000000 | | - 2 | 400 | 450.0000000000000000 | | + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- + 1 | 100 | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- indeed, we don't need filter on UPDATE explicitly if SELECT already prunes to one shard UPDATE summary_table SET uniques = 2 WHERE id IN (SELECT id FROM raw_table WHERE id = 1 and value IN (100, 200)); SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | | 2 - 2 | 400 | 450.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- use inner results for non-partition column UPDATE summary_table SET uniques = NULL WHERE min_value IN (SELECT value FROM raw_table WHERE id = 1) AND id = 1; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- - 1 | 100 | 200.0000000000000000 | | - 2 | 400 | 450.0000000000000000 | | + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- + 1 | 100 | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- these should not update anything @@ -950,10 +950,10 @@ UPDATE summary_table SET average_value = average_query.average FROM ( ) average_query WHERE id = 1 AND id = 4; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- - 1 | 100 | 200.0000000000000000 | | - 2 | 400 | 450.0000000000000000 | | + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- + 1 | 100 | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- update with NULL value @@ -962,10 +962,10 @@ UPDATE summary_table SET average_value = average_query.average FROM ( ) average_query WHERE id = 1; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- - 1 | 100 | | | - 2 | 400 | 450.0000000000000000 | | + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- + 1 | 100 | | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- multi-shard updates with recursively planned subqueries @@ -994,20 +994,20 @@ WHERE summary_table.id = metrics.id AND summary_table.id = 1; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- 1 | 100 | | 4 | 2 - 2 | 400 | 450.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- test joins UPDATE summary_table SET count = count + 1 FROM raw_table WHERE raw_table.id = summary_table.id AND summary_table.id = 1; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- 1 | 100 | | 5 | 2 - 2 | 400 | 450.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- test with prepared statements @@ -1022,10 +1022,10 @@ EXECUTE prepared_update_with_subquery(10, 1); EXECUTE prepared_update_with_subquery(10, 1); EXECUTE prepared_update_with_subquery(10, 1); SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- 1 | 100 | | 65 | 2 - 2 | 400 | 450.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- test with reference tables @@ -1037,23 +1037,23 @@ CREATE TABLE reference_summary_table ( count int, uniques int); SELECT create_reference_table('reference_raw_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('reference_summary_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO reference_raw_table VALUES (1, 100); INSERT INTO reference_raw_table VALUES (1, 200); INSERT INTO reference_raw_table VALUES (1, 200); INSERT INTO reference_raw_table VALUES (1,300), (2, 400), (2,500) RETURNING *; - id | value -----+------- + id | value +--------------------------------------------------------------------- 1 | 300 2 | 400 2 | 500 @@ -1062,10 +1062,10 @@ INSERT INTO reference_raw_table VALUES (1,300), (2, 400), (2,500) RETURNING *; INSERT INTO reference_summary_table VALUES (1); INSERT INTO reference_summary_table VALUES (2); SELECT * FROM reference_summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+---------------+-------+--------- - 1 | | | | - 2 | | | | + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- + 1 | | | | + 2 | | | | (2 rows) UPDATE reference_summary_table SET average_value = average_query.average FROM ( @@ -1081,10 +1081,10 @@ UPDATE reference_summary_table SET (min_value, average_value) = (SELECT min(value), avg(value) FROM reference_raw_table WHERE id = 2) WHERE id = 2; SELECT * FROM reference_summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- - 1 | | 200.0000000000000000 | | - 2 | 400 | 450.0000000000000000 | | + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- + 1 | | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- no need partition colum equalities on reference tables @@ -1092,10 +1092,10 @@ UPDATE reference_summary_table SET (count) = (SELECT id AS inner_id FROM reference_raw_table WHERE value = 500) WHERE min_value = 400; SELECT * FROM reference_summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- - 1 | | 200.0000000000000000 | | - 2 | 400 | 450.0000000000000000 | 2 | + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- + 1 | | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | 2 | (2 rows) -- can read from a reference table and update a distributed table @@ -1125,11 +1125,11 @@ UPDATE summary_table SET average_value = average_query.average FROM ( WHERE id = 3; COMMIT; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | 65 | 2 - 2 | 400 | 450.0000000000000000 | | - 3 | | 150.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | + 3 | | 150.0000000000000000 | | (3 rows) -- COPY on UPDATE part @@ -1143,12 +1143,12 @@ UPDATE summary_table SET average_value = average_query.average FROM ( WHERE id = 4; COMMIT; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | 65 | 2 - 2 | 400 | 450.0000000000000000 | | - 3 | | 150.0000000000000000 | | - 4 | | 150.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | (4 rows) -- COPY on both part @@ -1161,13 +1161,13 @@ UPDATE summary_table SET average_value = average_query.average FROM ( WHERE id = 5; COMMIT; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | 65 | 2 - 2 | 400 | 450.0000000000000000 | | - 3 | | 150.0000000000000000 | | - 4 | | 150.0000000000000000 | | - 5 | | 150.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | (5 rows) -- COPY on reference tables @@ -1180,20 +1180,20 @@ UPDATE summary_table SET average_value = average_query.average FROM ( WHERE id = 6; COMMIT; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | 65 | 2 - 2 | 400 | 450.0000000000000000 | | - 3 | | 150.0000000000000000 | | - 4 | | 150.0000000000000000 | | - 5 | | 150.0000000000000000 | | - 6 | | 150.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | + 6 | | 150.0000000000000000 | | (6 rows) -- test DELETE queries SELECT * FROM raw_table ORDER BY id, value; - id | value -----+------- + id | value +--------------------------------------------------------------------- 1 | 100 1 | 200 1 | 200 @@ -1211,25 +1211,25 @@ SELECT * FROM raw_table ORDER BY id, value; DELETE FROM summary_table WHERE min_value IN (SELECT value FROM raw_table WHERE id = 1) AND id = 1; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- - 2 | 400 | 450.0000000000000000 | | - 3 | | 150.0000000000000000 | | - 4 | | 150.0000000000000000 | | - 5 | | 150.0000000000000000 | | - 6 | | 150.0000000000000000 | | + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- + 2 | 400 | 450.0000000000000000 | | + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | + 6 | | 150.0000000000000000 | | (5 rows) -- test with different syntax DELETE FROM summary_table USING raw_table WHERE summary_table.id = raw_table.id AND raw_table.id = 2; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- - 3 | | 150.0000000000000000 | | - 4 | | 150.0000000000000000 | | - 5 | | 150.0000000000000000 | | - 6 | | 150.0000000000000000 | | + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | + 6 | | 150.0000000000000000 | | (4 rows) -- cannot read from a distributed table and delete from a reference table @@ -1237,12 +1237,12 @@ DELETE FROM reference_summary_table USING raw_table WHERE reference_summary_table.id = raw_table.id AND raw_table.id = 3; ERROR: cannot perform select on a distributed table and modify a reference table SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- - 3 | | 150.0000000000000000 | | - 4 | | 150.0000000000000000 | | - 5 | | 150.0000000000000000 | | - 6 | | 150.0000000000000000 | | + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | + 6 | | 150.0000000000000000 | | (4 rows) -- test connection API via using COPY with DELETEs @@ -1254,12 +1254,12 @@ DELETE FROM summary_table USING reference_raw_table WHERE summary_table.id = reference_raw_table.id AND reference_raw_table.id = 2; COMMIT; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- - 3 | | 150.0000000000000000 | | - 4 | | 150.0000000000000000 | | - 5 | | 150.0000000000000000 | | - 6 | | 150.0000000000000000 | | + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | + 6 | | 150.0000000000000000 | | (4 rows) -- test DELETEs with prepared statements @@ -1275,8 +1275,8 @@ EXECUTE prepared_delete_with_join(4); EXECUTE prepared_delete_with_join(5); EXECUTE prepared_delete_with_join(6); SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques -----+-----------+---------------+-------+--------- + id | min_value | average_value | count | uniques +--------------------------------------------------------------------- (0 rows) -- we don't support subqueries in VALUES clause diff --git a/src/test/regress/expected/multi_modifying_xacts.out b/src/test/regress/expected/multi_modifying_xacts.out index cb6f1d756..7af0de8d9 100644 --- a/src/test/regress/expected/multi_modifying_xacts.out +++ b/src/test/regress/expected/multi_modifying_xacts.out @@ -13,27 +13,27 @@ CREATE TABLE labs ( name text NOT NULL ); SELECT master_create_distributed_table('researchers', 'lab_id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('researchers', 2, 2); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) SELECT master_create_distributed_table('labs', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('labs', 1, 1); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) -- might be confusing to have two people in the same lab with the same name @@ -49,8 +49,8 @@ DELETE FROM researchers WHERE lab_id = 1 AND id = 2; INSERT INTO researchers VALUES (2, 1, 'John Backus'), (12, 1, 'Frances E. Allen'); COMMIT; SELECT name FROM researchers WHERE lab_id = 1 AND id % 10 = 2; - name ------------------- + name +--------------------------------------------------------------------- John Backus Frances E. Allen (2 rows) @@ -62,8 +62,8 @@ DELETE FROM researchers WHERE id = 14 AND lab_id = 2; ROLLBACK; -- should have rolled everything back SELECT * FROM researchers WHERE id = 15 AND lab_id = 2; - id | lab_id | name -----+--------+------ + id | lab_id | name +--------------------------------------------------------------------- (0 rows) -- abort a modification @@ -71,8 +71,8 @@ BEGIN; DELETE FROM researchers WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers WHERE lab_id = 1 AND id = 1; - name --------------- + name +--------------------------------------------------------------------- Donald Knuth (1 row) @@ -90,8 +90,8 @@ SAVEPOINT hire_thompson; INSERT INTO researchers VALUES (6, 3, 'Ken Thompson'); COMMIT; SELECT name FROM researchers WHERE lab_id = 3 AND id = 6; - name --------------- + name +--------------------------------------------------------------------- Ken Thompson (1 row) @@ -114,8 +114,8 @@ INSERT INTO researchers VALUES (8, 4, 'Douglas Engelbart'); ROLLBACK TO hire_engelbart; COMMIT; SELECT name FROM researchers WHERE lab_id = 4; - name ----------- + name +--------------------------------------------------------------------- Jim Gray (1 row) @@ -137,8 +137,8 @@ INSERT INTO researchers VALUES (8, 5, 'Douglas Engelbart'); INSERT INTO labs VALUES (5, 'Los Alamos'); COMMIT; SELECT * FROM researchers, labs WHERE labs.id = researchers.lab_id AND researchers.lab_id = 5; - id | lab_id | name | id | name -----+--------+-------------------+----+------------ + id | lab_id | name | id | name +--------------------------------------------------------------------- 8 | 5 | Douglas Engelbart | 5 | Los Alamos (1 row) @@ -153,15 +153,15 @@ INSERT INTO labs VALUES (6, 'Bell Labs'); INSERT INTO researchers VALUES (9, 6, 'Leslie Lamport'); ERROR: duplicate key value violates unique constraint "avoid_name_confusion_idx_1200001" DETAIL: Key (lab_id, name)=(6, Leslie Lamport) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx ABORT; -- SELECTs may occur after a modification: First check that selecting -- from the modified node works. BEGIN; INSERT INTO labs VALUES (6, 'Bell Labs'); SELECT count(*) FROM researchers WHERE lab_id = 6; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -176,8 +176,8 @@ AND sp.nodeport = :worker_1_port AND s.logicalrelid = 'researchers'::regclass; INSERT INTO labs VALUES (6, 'Bell Labs'); SELECT count(*) FROM researchers WHERE lab_id = 6; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -200,15 +200,15 @@ INSERT INTO labs VALUES (6, 'Bell Labs'); ABORT; -- but the DDL should correctly roll back SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.labs'::regclass; - Column | Type | Modifiers ---------+--------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- id | bigint | not null name | text | not null (2 rows) SELECT * FROM labs WHERE id = 6; - id | name -----+----------- + id | name +--------------------------------------------------------------------- 6 | Bell Labs (1 row) @@ -243,8 +243,8 @@ ROLLBACK; BEGIN; \copy labs from stdin delimiter ',' SELECT name FROM labs WHERE id = 10; - name ----------------- + name +--------------------------------------------------------------------- Weyland-Yutani Weyland-Yutani (2 rows) @@ -257,16 +257,16 @@ BEGIN; \copy labs from stdin delimiter ',' COMMIT; SELECT name FROM labs WHERE id = 11 OR id = 12 ORDER BY id; - name ----------------- + name +--------------------------------------------------------------------- Planet Express fsociety (2 rows) -- 1pc failure test SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) @@ -279,22 +279,22 @@ DETAIL: Key (lab_id, name)=(6, 'Bjarne Stroustrup') already exists. COMMIT; -- verify rollback SELECT * FROM researchers WHERE lab_id = 6; - id | lab_id | name -----+--------+---------------- + id | lab_id | name +--------------------------------------------------------------------- 9 | 6 | Leslie Lamport (1 row) SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- 2pc failure and success tests SET citus.multi_shard_commit_protocol TO '2pc'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) @@ -307,14 +307,14 @@ DETAIL: Key (lab_id, name)=(6, 'Bjarne Stroustrup') already exists. COMMIT; -- verify rollback SELECT * FROM researchers WHERE lab_id = 6; - id | lab_id | name -----+--------+---------------- + id | lab_id | name +--------------------------------------------------------------------- 9 | 6 | Leslie Lamport (1 row) SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -324,8 +324,8 @@ BEGIN; COMMIT; -- verify success SELECT * FROM researchers WHERE lab_id = 6; - id | lab_id | name -----+--------+---------------------- + id | lab_id | name +--------------------------------------------------------------------- 9 | 6 | Leslie Lamport 17 | 6 | 'Bjarne Stroustrup' 18 | 6 | 'Dennis Ritchie' @@ -333,8 +333,8 @@ SELECT * FROM researchers WHERE lab_id = 6; -- verify 2pc SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -350,8 +350,8 @@ SELECT * from run_command_on_workers('CREATE FUNCTION reject_large_id() RETURNS END; $rli$ LANGUAGE plpgsql;') ORDER BY nodeport; - nodename | nodeport | success | result ------------+----------+---------+----------------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | CREATE FUNCTION localhost | 57638 | t | CREATE FUNCTION (2 rows) @@ -359,8 +359,8 @@ ORDER BY nodeport; -- register after insert trigger SELECT * FROM run_command_on_placements('researchers', 'CREATE CONSTRAINT TRIGGER reject_large_researcher_id AFTER INSERT ON %s DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_large_id()') ORDER BY nodeport, shardid; - nodename | nodeport | shardid | success | result ------------+----------+---------+---------+---------------- + nodename | nodeport | shardid | success | result +--------------------------------------------------------------------- localhost | 57637 | 1200000 | t | CREATE TRIGGER localhost | 57637 | 1200001 | t | CREATE TRIGGER localhost | 57638 | 1200000 | t | CREATE TRIGGER @@ -377,16 +377,16 @@ DELETE FROM researchers WHERE lab_id = 6; \copy researchers FROM STDIN delimiter ',' COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:57638 +WARNING: failed to commit transaction on localhost:xxxxx WARNING: illegal value -WARNING: failed to commit transaction on localhost:57637 -WARNING: could not commit transaction for shard 1200001 on any active node +WARNING: failed to commit transaction on localhost:xxxxx +WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node \unset VERBOSITY -- verify everyhing including delete is rolled back SELECT * FROM researchers WHERE lab_id = 6; - id | lab_id | name -----+--------+---------------------- + id | lab_id | name +--------------------------------------------------------------------- 9 | 6 | Leslie Lamport 17 | 6 | 'Bjarne Stroustrup' 18 | 6 | 'Dennis Ritchie' @@ -395,8 +395,8 @@ SELECT * FROM researchers WHERE lab_id = 6; -- cleanup triggers and the function SELECT * from run_command_on_placements('researchers', 'drop trigger reject_large_researcher_id on %s') ORDER BY nodeport, shardid; - nodename | nodeport | shardid | success | result ------------+----------+---------+---------+-------------- + nodename | nodeport | shardid | success | result +--------------------------------------------------------------------- localhost | 57637 | 1200000 | t | DROP TRIGGER localhost | 57637 | 1200001 | t | DROP TRIGGER localhost | 57638 | 1200000 | t | DROP TRIGGER @@ -405,8 +405,8 @@ ORDER BY nodeport, shardid; SELECT * FROM run_command_on_workers('drop function reject_large_id()') ORDER BY nodeport; - nodename | nodeport | success | result ------------+----------+---------+--------------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | DROP FUNCTION localhost | 57638 | t | DROP FUNCTION (2 rows) @@ -423,13 +423,13 @@ ABORT; -- can perform parallel DDL even a connection is used for multiple shards BEGIN; SELECT lab_id FROM researchers WHERE lab_id = 1 AND id = 0; - lab_id --------- + lab_id +--------------------------------------------------------------------- (0 rows) SELECT lab_id FROM researchers WHERE lab_id = 2 AND id = 0; - lab_id --------- + lab_id +--------------------------------------------------------------------- (0 rows) ALTER TABLE researchers ADD COLUMN motto text; @@ -438,13 +438,13 @@ ROLLBACK; BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT lab_id FROM researchers WHERE lab_id = 1 AND id = 0; - lab_id --------- + lab_id +--------------------------------------------------------------------- (0 rows) SELECT lab_id FROM researchers WHERE lab_id = 2 AND id = 0; - lab_id --------- + lab_id +--------------------------------------------------------------------- (0 rows) ALTER TABLE researchers ADD COLUMN motto text; @@ -457,8 +457,8 @@ ALTER TABLE labs ADD COLUMN score float; ROLLBACK; -- should have rolled everything back SELECT * FROM labs WHERE id = 12; - id | name -----+---------- + id | name +--------------------------------------------------------------------- 12 | fsociety (1 row) @@ -468,15 +468,15 @@ CREATE TABLE objects ( name text NOT NULL ); SELECT master_create_distributed_table('objects', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('objects', 1, 2); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) -- test primary key violations @@ -484,13 +484,13 @@ BEGIN; INSERT INTO objects VALUES (1, 'apple'); INSERT INTO objects VALUES (1, 'orange'); ERROR: duplicate key value violates unique constraint "objects_pkey_1200003" -DETAIL: Key (id)=(1) already exists. -CONTEXT: while executing command on localhost:57637 +DETAIL: Key (id)=(X) already exists. +CONTEXT: while executing command on localhost:xxxxx COMMIT; -- data shouldn't have persisted... SELECT * FROM objects WHERE id = 1; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) -- and placements should still be healthy... @@ -500,8 +500,8 @@ FROM pg_dist_shard_placement AS sp, WHERE sp.shardid = s.shardid AND sp.shardstate = 1 AND s.logicalrelid = 'objects'::regclass; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -531,13 +531,13 @@ ERROR: illegal value COMMIT; -- so the data should noy be persisted SELECT * FROM objects WHERE id = 2; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs WHERE id = 7; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) -- and none of placements should be inactive @@ -549,8 +549,8 @@ AND sp.nodename = 'localhost' AND sp.nodeport = :worker_2_port AND sp.shardstate = 3 AND s.logicalrelid = 'objects'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -583,13 +583,13 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; -- data should NOT be persisted SELECT * FROM objects WHERE id = 1; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs WHERE id = 8; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) -- all placements should remain healthy @@ -600,8 +600,8 @@ WHERE sp.shardid = s.shardid AND sp.shardstate = 1 AND (s.logicalrelid = 'objects'::regclass OR s.logicalrelid = 'labs'::regclass); - count -------- + count +--------------------------------------------------------------------- 3 (1 row) @@ -620,11 +620,11 @@ INSERT INTO objects VALUES (2, 'BAD'); INSERT INTO labs VALUES (9, 'Umbrella Corporation'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:57638 +WARNING: failed to commit transaction on localhost:xxxxx -- data should be persisted SELECT * FROM objects WHERE id = 2; - id | name -----+------ + id | name +--------------------------------------------------------------------- 2 | BAD (1 row) @@ -637,8 +637,8 @@ AND sp.nodename = 'localhost' AND sp.nodeport = :worker_2_port AND sp.shardstate = 3 AND s.logicalrelid = 'objects'::regclass; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -663,21 +663,21 @@ INSERT INTO labs VALUES (8, 'Aperture Science'); INSERT INTO labs VALUES (9, 'BAD'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:57637 +WARNING: failed to commit transaction on localhost:xxxxx WARNING: illegal value -WARNING: failed to commit transaction on localhost:57638 -WARNING: could not commit transaction for shard 1200002 on any active node -WARNING: could not commit transaction for shard 1200003 on any active node +WARNING: failed to commit transaction on localhost:xxxxx +WARNING: could not commit transaction for shard xxxxx on any active node +WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node -- data should NOT be persisted SELECT * FROM objects WHERE id = 1; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs WHERE id = 8; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) -- all placements should remain healthy @@ -688,8 +688,8 @@ WHERE sp.shardid = s.shardid AND sp.shardstate = 1 AND (s.logicalrelid = 'objects'::regclass OR s.logicalrelid = 'labs'::regclass); - count -------- + count +--------------------------------------------------------------------- 3 (1 row) @@ -704,19 +704,19 @@ INSERT INTO labs VALUES (8, 'Aperture Science'); INSERT INTO labs VALUES (9, 'BAD'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:57637 -WARNING: could not commit transaction for shard 1200002 on any active node +WARNING: failed to commit transaction on localhost:xxxxx +WARNING: could not commit transaction for shard xxxxx on any active node \set VERBOSITY default -- data to objects should be persisted, but labs should not... SELECT * FROM objects WHERE id = 1; - id | name -----+------- + id | name +--------------------------------------------------------------------- 1 | apple (1 row) SELECT * FROM labs WHERE id = 8; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) -- labs should be healthy, but one object placement shouldn't be @@ -728,8 +728,8 @@ AND (s.logicalrelid = 'objects'::regclass OR s.logicalrelid = 'labs'::regclass) GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; - logicalrelid | shardstate | count ---------------+------------+------- + logicalrelid | shardstate | count +--------------------------------------------------------------------- labs | 1 | 1 objects | 1 | 1 objects | 3 | 1 @@ -738,9 +738,9 @@ ORDER BY s.logicalrelid, sp.shardstate; -- some append-partitioned tests for good measure CREATE TABLE append_researchers ( LIKE researchers ); SELECT master_create_distributed_table('append_researchers', 'id', 'append'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SET citus.shard_replication_factor TO 1; @@ -758,8 +758,8 @@ BEGIN; INSERT INTO append_researchers VALUES (0, 0, 'John Backus'); COMMIT; SELECT * FROM append_researchers WHERE id = 0; - id | lab_id | name -----+--------+------------- + id | lab_id | name +--------------------------------------------------------------------- 0 | 0 | John Backus (1 row) @@ -768,8 +768,8 @@ BEGIN; DELETE FROM append_researchers WHERE id = 0; ROLLBACK; SELECT * FROM append_researchers WHERE id = 0; - id | lab_id | name -----+--------+------------- + id | lab_id | name +--------------------------------------------------------------------- 0 | 0 | John Backus (1 row) @@ -781,8 +781,8 @@ ERROR: cannot run INSERT command which targets multiple shards HINT: Make sure the value for partition column "id" falls into a single shard. ROLLBACK; SELECT * FROM append_researchers; - id | lab_id | name -----+--------+------------- + id | lab_id | name +--------------------------------------------------------------------- 0 | 0 | John Backus (1 row) @@ -790,16 +790,16 @@ SELECT * FROM append_researchers; -- let's add some tests for them CREATE TABLE reference_modifying_xacts (key int, value int); SELECT create_reference_table('reference_modifying_xacts'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- very basic test, ensure that INSERTs work INSERT INTO reference_modifying_xacts VALUES (1, 1); SELECT * FROM reference_modifying_xacts; - key | value ------+------- + key | value +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -807,8 +807,8 @@ SELECT * FROM reference_modifying_xacts; BEGIN; INSERT INTO reference_modifying_xacts VALUES (2, 2); SELECT * FROM reference_modifying_xacts; - key | value ------+------- + key | value +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -816,8 +816,8 @@ SELECT * FROM reference_modifying_xacts; COMMIT; -- we should be able to see the insert outside of the transaction as well SELECT * FROM reference_modifying_xacts; - key | value ------+------- + key | value +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -826,8 +826,8 @@ SELECT * FROM reference_modifying_xacts; BEGIN; INSERT INTO reference_modifying_xacts VALUES (3, 3); SELECT * FROM reference_modifying_xacts; - key | value ------+------- + key | value +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -836,8 +836,8 @@ SELECT * FROM reference_modifying_xacts; ROLLBACK; -- see that we've not inserted SELECT * FROM reference_modifying_xacts; - key | value ------+------- + key | value +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -892,8 +892,8 @@ WHERE sp.shardid = s.shardid AND s.logicalrelid = 'reference_modifying_xacts'::regclass GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; - logicalrelid | shardstate | count ----------------------------+------------+------- + logicalrelid | shardstate | count +--------------------------------------------------------------------- reference_modifying_xacts | 1 | 2 (1 row) @@ -911,9 +911,9 @@ SET citus.shard_count = 4; SET citus.shard_replication_factor = 1; CREATE TABLE hash_modifying_xacts (key int, value int); SELECT create_distributed_table('hash_modifying_xacts', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- let's try to expand the xact participants @@ -955,8 +955,8 @@ ERROR: illegal value COMMIT; -- ensure that the value didn't go into the reference table SELECT * FROM reference_modifying_xacts WHERE key = 55; - key | value ------+------- + key | value +--------------------------------------------------------------------- (0 rows) -- now lets fail on of the workers for the hash distributed table table @@ -978,8 +978,8 @@ COMMIT; ERROR: illegal value -- ensure that the values didn't go into the reference table SELECT * FROM reference_modifying_xacts WHERE key = 12; - key | value ------+------- + key | value +--------------------------------------------------------------------- (0 rows) -- all placements should be healthy @@ -987,12 +987,12 @@ SELECT s.logicalrelid::regclass::text, sp.shardstate, count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid -AND (s.logicalrelid = 'reference_modifying_xacts'::regclass OR +AND (s.logicalrelid = 'reference_modifying_xacts'::regclass OR s.logicalrelid = 'hash_modifying_xacts'::regclass) GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; - logicalrelid | shardstate | count ----------------------------+------------+------- + logicalrelid | shardstate | count +--------------------------------------------------------------------- reference_modifying_xacts | 1 | 2 hash_modifying_xacts | 1 | 4 (2 rows) @@ -1015,18 +1015,18 @@ INSERT INTO reference_modifying_xacts VALUES (999, 3); ERROR: illegal value COMMIT; SELECT * FROM hash_modifying_xacts WHERE key = 80; - key | value ------+------- + key | value +--------------------------------------------------------------------- (0 rows) SELECT * FROM reference_modifying_xacts WHERE key = 66; - key | value ------+------- + key | value +--------------------------------------------------------------------- (0 rows) SELECT * FROM reference_modifying_xacts WHERE key = 999; - key | value ------+------- + key | value +--------------------------------------------------------------------- (0 rows) -- all placements should be healthy @@ -1034,12 +1034,12 @@ SELECT s.logicalrelid::regclass::text, sp.shardstate, count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid -AND (s.logicalrelid = 'reference_modifying_xacts'::regclass OR +AND (s.logicalrelid = 'reference_modifying_xacts'::regclass OR s.logicalrelid = 'hash_modifying_xacts'::regclass) GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; - logicalrelid | shardstate | count ----------------------------+------------+------- + logicalrelid | shardstate | count +--------------------------------------------------------------------- reference_modifying_xacts | 1 | 2 hash_modifying_xacts | 1 | 4 (2 rows) @@ -1047,22 +1047,22 @@ ORDER BY s.logicalrelid, sp.shardstate; -- now show that all modifications to reference -- tables are done in 2PC SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) INSERT INTO reference_modifying_xacts VALUES (70, 70); SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) -- reset the transactions table SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) @@ -1070,8 +1070,8 @@ BEGIN; INSERT INTO reference_modifying_xacts VALUES (71, 71); COMMIT; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -1080,15 +1080,15 @@ SET citus.shard_count = 4; SET citus.shard_replication_factor = 2; CREATE TABLE hash_modifying_xacts_second (key int, value int); SELECT create_distributed_table('hash_modifying_xacts_second', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- reset the transactions table SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) @@ -1097,36 +1097,36 @@ INSERT INTO hash_modifying_xacts_second VALUES (72, 1); INSERT INTO reference_modifying_xacts VALUES (72, 3); COMMIT; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) -- reset the transactions table SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) DELETE FROM reference_modifying_xacts; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) -- reset the transactions table SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) UPDATE reference_modifying_xacts SET key = 10; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -1146,30 +1146,30 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes SET citus.next_shard_id TO 1200015; CREATE TABLE reference_failure_test (key int, value int); SELECT create_reference_table('reference_failure_test'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- create a hash distributed table SET citus.shard_count TO 4; CREATE TABLE numbers_hash_failure_test(key int, value int); SELECT create_distributed_table('numbers_hash_failure_test', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- ensure that the shard is created for this user \c - test_user - :worker_1_port \dt reference_failure_test_1200015 List of relations - Schema | Name | Type | Owner ---------+--------------------------------+-------+----------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | reference_failure_test_1200015 | table | test_user (1 row) --- now connect with the default user, +-- now connect with the default user, -- and rename the existing user \c - :default_user - :worker_1_port ALTER USER test_user RENAME TO test_user_new; @@ -1177,21 +1177,21 @@ ALTER USER test_user RENAME TO test_user_new; \c - test_user - :master_port -- should fail since the worker doesn't have test_user anymore INSERT INTO reference_failure_test VALUES (1, '1'); -ERROR: connection error: localhost:57637 +ERROR: connection error: localhost:xxxxx -- the same as the above, but wrapped within a transaction BEGIN; INSERT INTO reference_failure_test VALUES (1, '1'); -ERROR: connection error: localhost:57637 +ERROR: connection error: localhost:xxxxx COMMIT; BEGIN; COPY reference_failure_test FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection error: localhost:57637 +ERROR: connection error: localhost:xxxxx COMMIT; -- show that no data go through the table and shard states are good SET client_min_messages to 'ERROR'; SELECT * FROM reference_failure_test; - key | value ------+------- + key | value +--------------------------------------------------------------------- (0 rows) RESET client_min_messages; @@ -1203,22 +1203,22 @@ WHERE sp.shardid = s.shardid AND s.logicalrelid = 'reference_failure_test'::regclass GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; - logicalrelid | shardstate | count -------------------------+------------+------- + logicalrelid | shardstate | count +--------------------------------------------------------------------- reference_failure_test | 1 | 2 (1 row) BEGIN; COPY numbers_hash_failure_test FROM STDIN WITH (FORMAT 'csv'); -WARNING: connection error: localhost:57637 -WARNING: connection error: localhost:57637 +WARNING: connection error: localhost:xxxxx +WARNING: connection error: localhost:xxxxx -- some placements are invalid before abort SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 1200016 | 3 | localhost | 57637 1200016 | 1 | localhost | 57638 1200017 | 1 | localhost | 57637 @@ -1232,10 +1232,10 @@ ORDER BY shardid, nodeport; ABORT; -- verify nothing is inserted SELECT count(*) FROM numbers_hash_failure_test; -WARNING: connection error: localhost:57637 -WARNING: connection error: localhost:57637 - count -------- +WARNING: connection error: localhost:xxxxx +WARNING: connection error: localhost:xxxxx + count +--------------------------------------------------------------------- 0 (1 row) @@ -1244,8 +1244,8 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 1200016 | 1 | localhost | 57637 1200016 | 1 | localhost | 57638 1200017 | 1 | localhost | 57637 @@ -1258,15 +1258,15 @@ ORDER BY shardid, nodeport; BEGIN; COPY numbers_hash_failure_test FROM STDIN WITH (FORMAT 'csv'); -WARNING: connection error: localhost:57637 -WARNING: connection error: localhost:57637 +WARNING: connection error: localhost:xxxxx +WARNING: connection error: localhost:xxxxx -- check shard states before commit SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 1200016 | 3 | localhost | 57637 1200016 | 1 | localhost | 57638 1200017 | 1 | localhost | 57637 @@ -1283,8 +1283,8 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 1200016 | 3 | localhost | 57637 1200016 | 1 | localhost | 57638 1200017 | 1 | localhost | 57637 @@ -1297,10 +1297,10 @@ ORDER BY shardid, nodeport; -- verify data is inserted SELECT count(*) FROM numbers_hash_failure_test; -WARNING: connection error: localhost:57637 -WARNING: connection error: localhost:57637 - count -------- +WARNING: connection error: localhost:xxxxx +WARNING: connection error: localhost:xxxxx + count +--------------------------------------------------------------------- 2 (1 row) @@ -1310,15 +1310,15 @@ ALTER USER test_user RENAME TO test_user_new; \c - test_user - :master_port -- fails on all shard placements INSERT INTO numbers_hash_failure_test VALUES (2,2); -ERROR: connection error: localhost:57638 --- connect back to the master with the proper user to continue the tests +ERROR: connection error: localhost:xxxxx +-- connect back to the master with the proper user to continue the tests \c - :default_user - :master_port SET citus.next_shard_id TO 1200020; SET citus.next_placement_id TO 1200033; -- unbreak both nodes by renaming the user back to the original name SELECT * FROM run_command_on_workers('ALTER USER test_user_new RENAME TO test_user'); - nodename | nodeport | success | result ------------+----------+---------+------------ + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | ALTER ROLE localhost | 57638 | t | ALTER ROLE (2 rows) @@ -1326,8 +1326,8 @@ SELECT * FROM run_command_on_workers('ALTER USER test_user_new RENAME TO test_us DROP TABLE reference_modifying_xacts, hash_modifying_xacts, hash_modifying_xacts_second, reference_failure_test, numbers_hash_failure_test; SELECT * FROM run_command_on_workers('DROP USER test_user'); - nodename | nodeport | success | result ------------+----------+---------+----------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | DROP ROLE localhost | 57638 | t | DROP ROLE (2 rows) @@ -1342,9 +1342,9 @@ CREATE TABLE usergroups ( name text ); SELECT create_reference_table('usergroups'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE itemgroups ( @@ -1352,9 +1352,9 @@ CREATE TABLE itemgroups ( name text ); SELECT create_reference_table('itemgroups'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE users ( @@ -1363,9 +1363,9 @@ CREATE TABLE users ( user_group int ); SELECT create_distributed_table('users', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE items ( @@ -1374,9 +1374,9 @@ CREATE TABLE items ( item_group int ); SELECT create_distributed_table('items', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Table to find values that live in different shards on the same node @@ -1388,8 +1388,8 @@ JOIN USING (shardid) ORDER BY id; - id | shard_name | nodename | nodeport -----+---------------+-----------+---------- + id | shard_name | nodename | nodeport +--------------------------------------------------------------------- 1 | users_1200022 | localhost | 57637 2 | users_1200025 | localhost | 57638 3 | users_1200023 | localhost | 57638 @@ -1410,8 +1410,8 @@ INSERT INTO items VALUES (1, 'item-1'); INSERT INTO items VALUES (6, 'item-6'); END; SELECT user_id FROM items ORDER BY user_id; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 6 (2 rows) @@ -1425,14 +1425,14 @@ ROLLBACK; -- perform parallel DDL after a co-located table has been read over 1 connection BEGIN; SELECT id FROM users WHERE id = 1; - id ----- + id +--------------------------------------------------------------------- 1 (1 row) SELECT id FROM users WHERE id = 6; - id ----- + id +--------------------------------------------------------------------- 6 (1 row) @@ -1442,14 +1442,14 @@ ROLLBACK; BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT id FROM users WHERE id = 1; - id ----- + id +--------------------------------------------------------------------- 1 (1 row) SELECT id FROM users WHERE id = 6; - id ----- + id +--------------------------------------------------------------------- 6 (1 row) @@ -1459,14 +1459,14 @@ ROLLBACK; BEGIN; ALTER TABLE items ADD COLUMN last_update timestamptz; SELECT id FROM users JOIN items ON (id = user_id) WHERE id = 1; - id ----- + id +--------------------------------------------------------------------- 1 (1 row) SELECT id FROM users JOIN items ON (id = user_id) WHERE id = 6; - id ----- + id +--------------------------------------------------------------------- 6 (1 row) @@ -1476,38 +1476,38 @@ BEGIN; \COPY users FROM STDIN WITH CSV -- now read from the reference table over each connection SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 2; - user_id ---------- + user_id +--------------------------------------------------------------------- (0 rows) SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 3; - user_id ---------- + user_id +--------------------------------------------------------------------- (0 rows) -- perform a DDL command on the reference table errors -- because the current implementation of COPY always opens one connection --- per placement SELECTs have to use those connections for correctness +-- per placement SELECTs have to use those connections for correctness ALTER TABLE itemgroups ADD COLUMN last_update timestamptz; -ERROR: cannot perform DDL on placement 1200036, which has been read over multiple connections +ERROR: cannot perform DDL on placement xxxxx, which has been read over multiple connections END; BEGIN; -- establish multiple connections to a node \COPY users FROM STDIN WITH CSV -- read from the reference table over each connection SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 2; - user_id ---------- + user_id +--------------------------------------------------------------------- (0 rows) SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 3; - user_id ---------- + user_id +--------------------------------------------------------------------- (0 rows) -- perform a DDL command on a co-located reference table ALTER TABLE usergroups ADD COLUMN last_update timestamptz; -ERROR: cannot perform DDL on placement 1200034 since a co-located placement has been read over multiple connections +ERROR: cannot perform DDL on placement xxxxx since a co-located placement has been read over multiple connections END; BEGIN; -- make a modification over connection 1 @@ -1522,13 +1522,13 @@ END; BEGIN; DELETE FROM users; SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 1; - user_id ---------- + user_id +--------------------------------------------------------------------- (0 rows) SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 6; - user_id ---------- + user_id +--------------------------------------------------------------------- (0 rows) END; @@ -1539,15 +1539,15 @@ BEGIN; \COPY users FROM STDIN WITH CSV -- Uses first connection, which wrote the row with id = 2 SELECT * FROM users JOIN usergroups ON (user_group = gid) WHERE id = 2; - id | name | user_group | gid | name -----+-------+------------+-----+------- + id | name | user_group | gid | name +--------------------------------------------------------------------- 2 | onder | 2 | 2 | group (1 row) -- Should use second connection, which wrote the row with id = 4 SELECT * FROM users JOIN usergroups ON (user_group = gid) WHERE id = 4; - id | name | user_group | gid | name -----+-------+------------+-----+------- + id | name | user_group | gid | name +--------------------------------------------------------------------- 4 | murat | 2 | 2 | group (1 row) @@ -1565,8 +1565,8 @@ $BODY$ LANGUAGE plpgsql; SELECT insert_abort(); ERROR: do not insert SELECT name FROM labs WHERE id = 1001; - name ------- + name +--------------------------------------------------------------------- (0 rows) -- if function_opens_transaction-block is disabled the insert commits immediately @@ -1574,8 +1574,8 @@ SET citus.function_opens_transaction_block TO off; SELECT insert_abort(); ERROR: do not insert SELECT name FROM labs WHERE id = 1001; - name ---------------- + name +--------------------------------------------------------------------- Rollback Labs (1 row) diff --git a/src/test/regress/expected/multi_multiuser.out b/src/test/regress/expected/multi_multiuser.out index 1276fa493..d88340208 100644 --- a/src/test/regress/expected/multi_multiuser.out +++ b/src/test/regress/expected/multi_multiuser.out @@ -7,24 +7,24 @@ SET citus.next_shard_id TO 1420000; SET citus.shard_replication_factor TO 1; CREATE TABLE test (id integer, val integer); SELECT create_distributed_table('test', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_coloc (id integer, val integer); SELECT create_distributed_table('test_coloc', 'id', colocate_with := 'test'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET citus.shard_count TO 1; CREATE TABLE singleshard (id integer, val integer); SELECT create_distributed_table('singleshard', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- turn off propagation to avoid Enterprise processing the following section @@ -123,35 +123,35 @@ ERROR: path must be in the pgsql_job_cache directory SET ROLE full_access; EXECUTE prepare_insert(1); EXECUTE prepare_select; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) INSERT INTO test VALUES (2); SELECT count(*) FROM test; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) SELECT count(*) FROM test WHERE id = 1; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SET citus.task_executor_type TO 'task-tracker'; SELECT count(*), min(current_user) FROM test; - count | min --------+------------- + count | min +--------------------------------------------------------------------- 2 | full_access (1 row) -- test re-partition query (needs to transmit intermediate results) SELECT count(*) FROM test a JOIN test b ON (a.val = b.val) WHERE a.id = 1 AND b.id = 2; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -166,24 +166,24 @@ ERROR: operation is not allowed HINT: Run the command with a superuser. -- create a task that other users should not be able to inspect SELECT task_tracker_assign_task(1, 1, 'SELECT 1'); - task_tracker_assign_task --------------------------- - + task_tracker_assign_task +--------------------------------------------------------------------- + (1 row) -- check read permission SET ROLE read_access; -- should be allowed to run commands, as the current user SELECT result FROM run_command_on_workers($$SELECT current_user$$); - result -------------- + result +--------------------------------------------------------------------- read_access read_access (2 rows) SELECT result FROM run_command_on_placements('test', $$SELECT current_user$$); - result -------------- + result +--------------------------------------------------------------------- read_access read_access read_access @@ -191,8 +191,8 @@ SELECT result FROM run_command_on_placements('test', $$SELECT current_user$$); (4 rows) SELECT result FROM run_command_on_colocated_placements('test', 'test_coloc', $$SELECT current_user$$); - result -------------- + result +--------------------------------------------------------------------- read_access read_access read_access @@ -202,36 +202,36 @@ SELECT result FROM run_command_on_colocated_placements('test', 'test_coloc', $$S EXECUTE prepare_insert(1); ERROR: permission denied for table test EXECUTE prepare_select; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) INSERT INTO test VALUES (2); ERROR: permission denied for table test SELECT count(*) FROM test; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) SELECT count(*) FROM test WHERE id = 1; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SET citus.task_executor_type TO 'task-tracker'; SELECT count(*), min(current_user) FROM test; - count | min --------+------------- + count | min +--------------------------------------------------------------------- 2 | read_access (1 row) -- test re-partition query (needs to transmit intermediate results) SELECT count(*) FROM test a JOIN test b ON (a.val = b.val) WHERE a.id = 1 AND b.id = 2; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -250,8 +250,8 @@ ERROR: must be owner of schema pg_merge_job_0001 -- should not be allowed to take aggressive locks on table BEGIN; SELECT lock_relation_if_exists('test', 'ACCESS SHARE'); - lock_relation_if_exists -------------------------- + lock_relation_if_exists +--------------------------------------------------------------------- t (1 row) @@ -285,14 +285,14 @@ RESET citus.task_executor_type; -- should be able to use intermediate results as any user BEGIN; SELECT create_intermediate_result('topten', 'SELECT s FROM generate_series(1,10) s'); - create_intermediate_result ----------------------------- + create_intermediate_result +--------------------------------------------------------------------- 10 (1 row) SELECT * FROM read_intermediate_result('topten', 'binary'::citus_copy_format) AS res (s int) ORDER BY s; - s ----- + s +--------------------------------------------------------------------- 1 2 3 @@ -333,22 +333,22 @@ SET ROLE full_access; CREATE TABLE my_table (id integer, val integer); RESET ROLE; SELECT create_distributed_table('my_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT result FROM run_command_on_workers($$SELECT tableowner FROM pg_tables WHERE tablename LIKE 'my_table_%' LIMIT 1$$); - result -------------- + result +--------------------------------------------------------------------- full_access full_access (2 rows) SELECT task_tracker_cleanup_job(1); - task_tracker_cleanup_job --------------------------- - + task_tracker_cleanup_job +--------------------------------------------------------------------- + (1 row) -- table should be distributable by super user when it has data in there @@ -358,14 +358,14 @@ INSERT INTO my_table_with_data VALUES (1,2); RESET ROLE; SELECT create_distributed_table('my_table_with_data', 'id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM my_table_with_data; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -385,9 +385,9 @@ RESET ROLE; SET ROLE read_access; SELECT create_distributed_table('my_role_table_with_data', 'id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) RESET ROLE; @@ -395,8 +395,8 @@ RESET ROLE; SELECT result FROM run_command_on_workers($cmd$ SELECT tableowner FROM pg_tables WHERE tablename LIKE 'my_role_table_with_data%' LIMIT 1; $cmd$); - result ------------ + result +--------------------------------------------------------------------- some_role some_role (2 rows) @@ -408,8 +408,6 @@ $cmd$); SET ROLE usage_access; CREATE TABLE full_access_user_schema.t1 (id int); ERROR: permission denied for schema full_access_user_schema -LINE 1: CREATE TABLE full_access_user_schema.t1 (id int); - ^ RESET ROLE; -- now we create the table for the user CREATE TABLE full_access_user_schema.t1 (id int); @@ -421,7 +419,7 @@ INSERT INTO full_access_user_schema.t1 VALUES (1),(2),(3); -- not allowed to create a table SELECT create_distributed_table('full_access_user_schema.t1', 'id'); ERROR: permission denied for schema full_access_user_schema -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx RESET ROLE; SET ROLE usage_access; CREATE TYPE usage_access_type AS ENUM ('a', 'b'); @@ -432,48 +430,48 @@ SELECT create_distributed_function('usage_access_func(usage_access_type,int[])') ERROR: must be owner of function usage_access_func SET ROLE usage_access; SELECT create_distributed_function('usage_access_func(usage_access_type,int[])'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT typowner::regrole FROM pg_type WHERE typname = 'usage_access_type'; - typowner --------------- + typowner +--------------------------------------------------------------------- usage_access (1 row) SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func'; - proowner --------------- + proowner +--------------------------------------------------------------------- usage_access (1 row) SELECT run_command_on_workers($$SELECT typowner::regrole FROM pg_type WHERE typname = 'usage_access_type'$$); - run_command_on_workers ----------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,usage_access) (localhost,57638,t,usage_access) (2 rows) SELECT run_command_on_workers($$SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func'$$); - run_command_on_workers ----------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,usage_access) (localhost,57638,t,usage_access) (2 rows) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) CREATE TABLE colocation_table(id text); SELECT create_distributed_table('colocation_table','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- now, make sure that the user can use the function @@ -482,14 +480,14 @@ BEGIN; CREATE FUNCTION usage_access_func_second(key int, variadic v int[]) RETURNS text LANGUAGE plpgsql AS 'begin return current_user; end;'; SELECT create_distributed_function('usage_access_func_second(int,int[])', '$1', colocate_with := 'colocation_table'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT usage_access_func_second(1, 2,3,4,5) FROM full_access_user_schema.t1 LIMIT 1; - usage_access_func_second --------------------------- + usage_access_func_second +--------------------------------------------------------------------- usage_access (1 row) @@ -500,27 +498,27 @@ CREATE FUNCTION usage_access_func_third(key int, variadic v int[]) RETURNS text \c - - - :master_port -- show that the current user is a super user SELECT usesuper FROM pg_user where usename IN (SELECT current_user); - usesuper ----------- + usesuper +--------------------------------------------------------------------- t (1 row) -- superuser creates the distributed function that is owned by a regular user SELECT create_distributed_function('usage_access_func_third(int,int[])', '$1', colocate_with := 'colocation_table'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func_third'; - proowner --------------- + proowner +--------------------------------------------------------------------- usage_access (1 row) SELECT run_command_on_workers($$SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func_third'$$); - run_command_on_workers ----------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,usage_access) (localhost,57638,t,usage_access) (2 rows) @@ -528,24 +526,24 @@ SELECT run_command_on_workers($$SELECT proowner::regrole FROM pg_proc WHERE pron -- we don't want other tests to have metadata synced -- that might change the test outputs, so we're just trying to be careful SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) RESET ROLE; -- now we distribute the table as super user SELECT create_distributed_table('full_access_user_schema.t1', 'id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- verify the owner of the shards for the distributed tables @@ -556,8 +554,8 @@ SELECT result FROM run_command_on_workers($cmd$ AND tablename LIKE 't1_%' LIMIT 1; $cmd$); - result --------------- + result +--------------------------------------------------------------------- usage_access usage_access (2 rows) @@ -566,9 +564,9 @@ $cmd$); SET ROLE full_access; CREATE TABLE full_access_user_schema.t2(id int); SELECT create_distributed_table('full_access_user_schema.t2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) RESET ROLE; @@ -579,15 +577,15 @@ BEGIN; CREATE TABLE full_access_user_schema.r1(id int); SET LOCAL citus.shard_count TO 1; SELECT create_distributed_table('full_access_user_schema.r1', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT upgrade_to_reference_table('full_access_user_schema.r1'); - upgrade_to_reference_table ----------------------------- - + upgrade_to_reference_table +--------------------------------------------------------------------- + (1 row) COMMIT; @@ -599,9 +597,9 @@ BEGIN; CREATE TABLE full_access_user_schema.r2(id int); SET LOCAL citus.shard_count TO 1; SELECT create_distributed_table('full_access_user_schema.r2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) COMMIT; @@ -613,9 +611,9 @@ ERROR: must be owner of table r2 RESET ROLE; -- the super user should be able SELECT upgrade_to_reference_table('full_access_user_schema.r2'); - upgrade_to_reference_table ----------------------------- - + upgrade_to_reference_table +--------------------------------------------------------------------- + (1 row) -- verify the owner of the shards for the reference table @@ -626,17 +624,17 @@ SELECT result FROM run_command_on_workers($cmd$ AND tablename LIKE 'r2_%' LIMIT 1; $cmd$); - result -------------- + result +--------------------------------------------------------------------- full_access full_access (2 rows) -- super user should be the only one being able to call worker_cleanup_job_schema_cache SELECT worker_cleanup_job_schema_cache(); - worker_cleanup_job_schema_cache ---------------------------------- - + worker_cleanup_job_schema_cache +--------------------------------------------------------------------- + (1 row) SET ROLE full_access; @@ -656,9 +654,9 @@ RESET ROLE; \c - - - :worker_1_port SET ROLE full_access; SELECT worker_hash_partition_table(42,1,'SELECT a FROM generate_series(1,100) AS a', 'a', 23, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]); - worker_hash_partition_table ------------------------------ - + worker_hash_partition_table +--------------------------------------------------------------------- + (1 row) RESET ROLE; @@ -666,21 +664,21 @@ RESET ROLE; \c - - - :worker_2_port -- super user should not be able to copy files created by a user SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port); -WARNING: could not open file "base/pgsql_job_cache/job_0042/task_000001/p_00001.10": No such file or directory -CONTEXT: while executing command on localhost:57637 -ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_00001" from localhost:57637 +WARNING: could not open file "base/pgsql_job_cache/job_0042/task_000001/p_00001.xxxx": No such file or directory +CONTEXT: while executing command on localhost:xxxxx +ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_00001" from localhost:xxxxx -- different user should not be able to fetch partition file SET ROLE usage_access; SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port); -WARNING: could not open file "base/pgsql_job_cache/job_0042/task_000001/p_00001.18110": No such file or directory -CONTEXT: while executing command on localhost:57637 -ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_00001" from localhost:57637 +WARNING: could not open file "base/pgsql_job_cache/job_0042/task_000001/p_00001.xxxx": No such file or directory +CONTEXT: while executing command on localhost:xxxxx +ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_00001" from localhost:xxxxx -- only the user whom created the files should be able to fetch SET ROLE full_access; SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port); - worker_fetch_partition_file ------------------------------ - + worker_fetch_partition_file +--------------------------------------------------------------------- + (1 row) RESET ROLE; @@ -697,9 +695,9 @@ SET ROLE full_access; -- user could call worker_merge_files_into_table and store the results in public, which is -- not what we want SELECT task_tracker_assign_task(42, 1, 'SELECT 1'); - task_tracker_assign_task --------------------------- - + task_tracker_assign_task +--------------------------------------------------------------------- + (1 row) RESET ROLE; @@ -711,29 +709,29 @@ RESET ROLE; -- test that the super user is unable to read the contents of the intermediate file, -- although it does create the table SELECT worker_merge_files_into_table(42, 1, ARRAY['a'], ARRAY['integer']); -WARNING: Task file "task_000001.18048" does not have expected suffix ".10" - worker_merge_files_into_table -------------------------------- - +WARNING: Task file "task_000001.xxxx" does not have expected suffix ".10" + worker_merge_files_into_table +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_merge_job_0042.task_000001; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) DROP TABLE pg_merge_job_0042.task_000001; -- drop table so we can reuse the same files for more tests SET ROLE full_access; SELECT worker_merge_files_into_table(42, 1, ARRAY['a'], ARRAY['integer']); - worker_merge_files_into_table -------------------------------- - + worker_merge_files_into_table +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_merge_job_0042.task_000001; - count -------- + count +--------------------------------------------------------------------- 25 (1 row) @@ -753,21 +751,21 @@ SELECT worker_merge_files_and_run_query(42, 1, 'CREATE TABLE task_000001_merge(merge_column_0 int)', 'CREATE TABLE task_000001 (a) AS SELECT sum(merge_column_0) FROM task_000001_merge' ); -WARNING: Task file "task_000001.18048" does not have expected suffix ".10" - worker_merge_files_and_run_query ----------------------------------- - +WARNING: Task file "task_000001.xxxx" does not have expected suffix ".10" + worker_merge_files_and_run_query +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_merge_job_0042.task_000001_merge; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_merge_job_0042.task_000001; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -778,9 +776,9 @@ SELECT worker_merge_files_and_run_query(42, 1, 'CREATE TABLE task_000001_merge(merge_column_0 int)', 'CREATE TABLE task_000001 (a) AS SELECT sum(merge_column_0) FROM task_000001_merge' ); - worker_merge_files_and_run_query ----------------------------------- - + worker_merge_files_and_run_query +--------------------------------------------------------------------- + (1 row) -- test that owner of task cannot execute arbitrary sql @@ -797,14 +795,14 @@ SELECT worker_merge_files_and_run_query(42, 1, ERROR: permission denied to drop role CONTEXT: SQL statement "DROP USER usage_access" SELECT count(*) FROM pg_merge_job_0042.task_000001_merge; - count -------- + count +--------------------------------------------------------------------- 25 (1 row) SELECT count(*) FROM pg_merge_job_0042.task_000001; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -812,8 +810,8 @@ DROP TABLE pg_merge_job_0042.task_000001, pg_merge_job_0042.task_000001_merge; - RESET ROLE; \c - - - :master_port SELECT run_command_on_workers($$SELECT task_tracker_cleanup_job(42);$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"") (localhost,57638,t,"") (2 rows) diff --git a/src/test/regress/expected/multi_mx_add_coordinator.out b/src/test/regress/expected/multi_mx_add_coordinator.out index 3b41a445b..68bec5415 100644 --- a/src/test/regress/expected/multi_mx_add_coordinator.out +++ b/src/test/regress/expected/multi_mx_add_coordinator.out @@ -8,8 +8,8 @@ SET citus.replication_model TO streaming; SET client_min_messages TO WARNING; CREATE USER reprefuser WITH LOGIN; SELECT run_command_on_workers('CREATE USER reprefuser WITH LOGIN'); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -17,30 +17,30 @@ SELECT run_command_on_workers('CREATE USER reprefuser WITH LOGIN'); SET citus.enable_alter_role_propagation TO ON; ALTER ROLE reprefuser WITH CREATEDB; SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) -- test that coordinator pg_dist_node entry is synced to the workers SELECT wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) SELECT verify_metadata('localhost', :worker_1_port), verify_metadata('localhost', :worker_2_port); - verify_metadata | verify_metadata ------------------+----------------- + verify_metadata | verify_metadata +--------------------------------------------------------------------- t | t (1 row) CREATE TABLE ref(a int); SELECT create_reference_table('ref'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- alter role from mx worker isn't propagated @@ -48,15 +48,15 @@ SELECT create_reference_table('ref'); SET citus.enable_alter_role_propagation TO ON; ALTER ROLE reprefuser WITH CREATEROLE; select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser'; - rolcreatedb | rolcreaterole --------------+--------------- + rolcreatedb | rolcreaterole +--------------------------------------------------------------------- t | t (1 row) \c - - - :worker_2_port select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser'; - rolcreatedb | rolcreaterole --------------+--------------- + rolcreatedb | rolcreaterole +--------------------------------------------------------------------- t | f (1 row) @@ -64,8 +64,8 @@ select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser'; SET search_path TO mx_add_coordinator,public; SET client_min_messages TO WARNING; select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser'; - rolcreatedb | rolcreaterole --------------+--------------- + rolcreatedb | rolcreaterole +--------------------------------------------------------------------- t | f (1 row) @@ -78,8 +78,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable LOG: executing the command locally: SELECT count(*) AS count FROM mx_add_coordinator.ref_7000000 ref - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -88,8 +88,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable LOG: executing the command locally: SELECT count(*) AS count FROM mx_add_coordinator.ref_7000000 ref - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -99,8 +99,8 @@ SELECT count(*) FROM ref; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -108,8 +108,8 @@ SELECT count(*) FROM ref; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -117,8 +117,8 @@ SELECT count(*) FROM ref; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -143,8 +143,8 @@ ERROR: relation local_table is not distributed \c - - - :master_port SET search_path TO mx_add_coordinator,public; SELECT * FROM ref ORDER BY a; - a ---- + a +--------------------------------------------------------------------- 2 3 (2 rows) @@ -152,34 +152,34 @@ SELECT * FROM ref ORDER BY a; -- Clear pg_dist_transaction before removing the node. This is to keep the output -- of multi_mx_transaction_recovery consistent. SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM run_command_on_workers('SELECT recover_prepared_transactions()'); - count -------- + count +--------------------------------------------------------------------- 2 (1 row) SELECT master_remove_node('localhost', :master_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) -- test that coordinator pg_dist_node entry was removed from the workers SELECT wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) SELECT verify_metadata('localhost', :worker_1_port), verify_metadata('localhost', :worker_2_port); - verify_metadata | verify_metadata ------------------+----------------- + verify_metadata | verify_metadata +--------------------------------------------------------------------- t | t (1 row) diff --git a/src/test/regress/expected/multi_mx_call.out b/src/test/regress/expected/multi_mx_call.out index b7c7d6725..94f2260a7 100644 --- a/src/test/regress/expected/multi_mx_call.out +++ b/src/test/regress/expected/multi_mx_call.out @@ -7,9 +7,9 @@ set citus.replication_model to 'statement'; -- This table requires specific settings, create before getting into things create table mx_call_dist_table_replica(id int, val int); select create_distributed_table('mx_call_dist_table_replica', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) insert into mx_call_dist_table_replica values (9,1),(8,2),(7,3),(6,4),(5,5); @@ -20,42 +20,42 @@ set citus.replication_model to 'streaming'; -- create table mx_call_dist_table_1(id int, val int); select create_distributed_table('mx_call_dist_table_1', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) insert into mx_call_dist_table_1 values (3,1),(4,5),(9,2),(6,5),(3,5); create table mx_call_dist_table_2(id int, val int); select create_distributed_table('mx_call_dist_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) insert into mx_call_dist_table_2 values (1,1),(1,2),(2,2),(3,3),(3,4); create table mx_call_dist_table_bigint(id bigint, val bigint); select create_distributed_table('mx_call_dist_table_bigint', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) insert into mx_call_dist_table_bigint values (1,1),(1,2),(2,2),(3,3),(3,4); create table mx_call_dist_table_ref(id int, val int); select create_reference_table('mx_call_dist_table_ref'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) insert into mx_call_dist_table_ref values (2,7),(1,8),(2,8),(1,8),(2,8); create type mx_call_enum as enum ('A', 'S', 'D', 'F'); create table mx_call_dist_table_enum(id int, key mx_call_enum); select create_distributed_table('mx_call_dist_table_enum', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) insert into mx_call_dist_table_enum values (1,'S'),(2,'A'),(3,'D'),(4,'F'); @@ -85,47 +85,47 @@ BEGIN END;$$; -- Test that undistributed procedures have no issue executing call multi_mx_call.mx_call_proc(2, 0); - y ----- + y +--------------------------------------------------------------------- 29 (1 row) call multi_mx_call.mx_call_proc_custom_types('S', 'A'); - x | y ----+--- + x | y +--------------------------------------------------------------------- F | S (1 row) -- Same for unqualified names call mx_call_proc(2, 0); - y ----- + y +--------------------------------------------------------------------- 29 (1 row) call mx_call_proc_custom_types('S', 'A'); - x | y ----+--- + x | y +--------------------------------------------------------------------- F | S (1 row) -- Mark both procedures as distributed ... select create_distributed_function('mx_call_proc(int,int)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) select create_distributed_function('mx_call_proc_bigint(bigint,bigint)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) select create_distributed_function('mx_call_proc_custom_types(mx_call_enum,mx_call_enum)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) -- We still don't route them to the workers, because they aren't @@ -133,83 +133,83 @@ select create_distributed_function('mx_call_proc_custom_types(mx_call_enum,mx_ca SET client_min_messages TO DEBUG1; call multi_mx_call.mx_call_proc(2, 0); DEBUG: stored procedure does not have co-located tables -DEBUG: generating subplan 11_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment -DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y ----- + y +--------------------------------------------------------------------- 29 (1 row) call mx_call_proc_bigint(4, 2); DEBUG: stored procedure does not have co-located tables - y ---- + y +--------------------------------------------------------------------- 8 (1 row) call multi_mx_call.mx_call_proc_custom_types('S', 'A'); DEBUG: stored procedure does not have co-located tables - x | y ----+--- + x | y +--------------------------------------------------------------------- F | S (1 row) -- Mark them as colocated with a table. Now we should route them to workers. select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass, 1); - colocate_proc_with_table --------------------------- - + colocate_proc_with_table +--------------------------------------------------------------------- + (1 row) select colocate_proc_with_table('mx_call_proc_bigint', 'mx_call_dist_table_bigint'::regclass, 1); - colocate_proc_with_table --------------------------- - + colocate_proc_with_table +--------------------------------------------------------------------- + (1 row) select colocate_proc_with_table('mx_call_proc_custom_types', 'mx_call_dist_table_enum'::regclass, 1); - colocate_proc_with_table --------------------------- - + colocate_proc_with_table +--------------------------------------------------------------------- + (1 row) call multi_mx_call.mx_call_proc(2, 0); DEBUG: pushing down the procedure - y ----- + y +--------------------------------------------------------------------- 28 (1 row) call multi_mx_call.mx_call_proc_custom_types('S', 'A'); DEBUG: pushing down the procedure - x | y ----+--- + x | y +--------------------------------------------------------------------- S | S (1 row) call mx_call_proc(2, 0); DEBUG: pushing down the procedure - y ----- + y +--------------------------------------------------------------------- 28 (1 row) call mx_call_proc_custom_types('S', 'A'); DEBUG: pushing down the procedure - x | y ----+--- + x | y +--------------------------------------------------------------------- S | S (1 row) -- Test implicit cast of int to bigint call mx_call_proc_bigint(4, 2); DEBUG: pushing down the procedure - y ---- + y +--------------------------------------------------------------------- 8 (1 row) @@ -217,14 +217,14 @@ DEBUG: pushing down the procedure begin; call multi_mx_call.mx_call_proc(2, 0); DEBUG: cannot push down CALL in multi-statement transaction -DEBUG: generating subplan 13_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment -DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y ----- + y +--------------------------------------------------------------------- 29 (1 row) @@ -236,88 +236,88 @@ drop table mx_call_dist_table_enum; SET client_min_messages TO DEBUG1; call multi_mx_call.mx_call_proc_custom_types('S', 'A'); DEBUG: stored procedure does not have co-located tables - x | y ----+--- + x | y +--------------------------------------------------------------------- F | S (1 row) -- Make sure we do bounds checking on distributed argument index -- This also tests that we have cache invalidation for pg_dist_object updates select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass, -1); - colocate_proc_with_table --------------------------- - + colocate_proc_with_table +--------------------------------------------------------------------- + (1 row) call multi_mx_call.mx_call_proc(2, 0); DEBUG: cannot push down invalid distribution_argument_index -DEBUG: generating subplan 15_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment -DEBUG: Plan 15 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('15_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y ----- + y +--------------------------------------------------------------------- 29 (1 row) select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass, 2); - colocate_proc_with_table --------------------------- - + colocate_proc_with_table +--------------------------------------------------------------------- + (1 row) call multi_mx_call.mx_call_proc(2, 0); DEBUG: cannot push down invalid distribution_argument_index -DEBUG: generating subplan 18_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment -DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y ----- + y +--------------------------------------------------------------------- 29 (1 row) -- We don't currently support colocating with reference tables select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_ref'::regclass, 1); - colocate_proc_with_table --------------------------- - + colocate_proc_with_table +--------------------------------------------------------------------- + (1 row) call multi_mx_call.mx_call_proc(2, 0); DEBUG: cannot push down CALL for reference tables -DEBUG: generating subplan 20_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment -DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y ----- + y +--------------------------------------------------------------------- 29 (1 row) -- We don't currently support colocating with replicated tables select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_replica'::regclass, 1); - colocate_proc_with_table --------------------------- - + colocate_proc_with_table +--------------------------------------------------------------------- + (1 row) call multi_mx_call.mx_call_proc(2, 0); DEBUG: cannot push down CALL for replicated distributed tables -DEBUG: generating subplan 22_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment -DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y ----- + y +--------------------------------------------------------------------- 29 (1 row) @@ -325,9 +325,9 @@ SET client_min_messages TO NOTICE; drop table mx_call_dist_table_replica; SET client_min_messages TO DEBUG1; select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass, 1); - colocate_proc_with_table --------------------------- - + colocate_proc_with_table +--------------------------------------------------------------------- + (1 row) -- Test that we handle transactional constructs correctly inside a procedure @@ -347,16 +347,16 @@ CALL multi_mx_call.mx_call_proc_tx(10); select create_distributed_function('mx_call_proc_tx(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) CALL multi_mx_call.mx_call_proc_tx(20); DEBUG: pushing down the procedure SELECT id, val FROM mx_call_dist_table_1 ORDER BY id, val; - id | val -----+----- + id | val +--------------------------------------------------------------------- 3 | 1 3 | 5 4 | 5 @@ -377,9 +377,9 @@ END;$$; select create_distributed_function('mx_call_proc_raise(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) \set VERBOSITY terse @@ -390,41 +390,41 @@ ERROR: error \set VERBOSITY default -- Test that we don't propagate to non-metadata worker nodes select stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) select stop_metadata_sync_to_node('localhost', :worker_2_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) call multi_mx_call.mx_call_proc(2, 0); DEBUG: there is no worker node with metadata -DEBUG: generating subplan 28_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment -DEBUG: Plan 28 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('28_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y ----- + y +--------------------------------------------------------------------- 29 (1 row) SET client_min_messages TO NOTICE; select start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) select start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) -- stop_metadata_sync_to_node()/start_metadata_sync_to_node() might make @@ -441,44 +441,44 @@ CREATE FUNCTION mx_call_add(int, int) RETURNS int SELECT create_distributed_function('mx_call_add(int,int)'); DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) -- non-const distribution parameters cannot be pushed down call multi_mx_call.mx_call_proc(2, mx_call_add(3, 4)); DEBUG: distribution argument value must be a constant -DEBUG: generating subplan 1_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment -DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y ----- + y +--------------------------------------------------------------------- 29 (1 row) -- non-const parameter can be pushed down call multi_mx_call.mx_call_proc(multi_mx_call.mx_call_add(3, 4), 2); DEBUG: pushing down the procedure - y ----- + y +--------------------------------------------------------------------- 33 (1 row) -- volatile parameter cannot be pushed down call multi_mx_call.mx_call_proc(floor(random())::int, 2); DEBUG: arguments in a distributed stored procedure must be constant expressions -DEBUG: generating subplan 3_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_call.mx_call_dist_table_1 t1 JOIN multi_mx_call.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment -DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y ----- + y +--------------------------------------------------------------------- 27 (1 row) diff --git a/src/test/regress/expected/multi_mx_create_table.out b/src/test/regress/expected/multi_mx_create_table.out index 9ad854dd0..e64a49439 100644 --- a/src/test/regress/expected/multi_mx_create_table.out +++ b/src/test/regress/expected/multi_mx_create_table.out @@ -3,15 +3,15 @@ -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) -- create schema to test schema support @@ -145,9 +145,9 @@ CREATE TABLE nation_hash( ); SET citus.shard_count TO 16; SELECT create_distributed_table('nation_hash', 'n_nationkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET search_path TO citus_mx_test_schema; @@ -159,9 +159,9 @@ CREATE TABLE citus_mx_test_schema.nation_hash( n_comment varchar(152) ); SELECT create_distributed_table('nation_hash', 'n_nationkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE citus_mx_test_schema_join_1.nation_hash ( @@ -171,9 +171,9 @@ CREATE TABLE citus_mx_test_schema_join_1.nation_hash ( n_comment varchar(152)); SET citus.shard_count TO 4; SELECT create_distributed_table('citus_mx_test_schema_join_1.nation_hash', 'n_nationkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE citus_mx_test_schema_join_1.nation_hash_2 ( @@ -182,9 +182,9 @@ CREATE TABLE citus_mx_test_schema_join_1.nation_hash_2 ( n_regionkey integer not null, n_comment varchar(152)); SELECT create_distributed_table('citus_mx_test_schema_join_1.nation_hash_2', 'n_nationkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET search_path TO citus_mx_test_schema_join_2; @@ -194,9 +194,9 @@ CREATE TABLE nation_hash ( n_regionkey integer not null, n_comment varchar(152)); SELECT create_distributed_table('nation_hash', 'n_nationkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET search_path TO citus_mx_test_schema; @@ -207,9 +207,9 @@ CREATE TABLE nation_hash_collation_search_path( n_comment varchar(152) ); SELECT create_distributed_table('nation_hash_collation_search_path', 'n_nationkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \COPY nation_hash_collation_search_path FROM STDIN with delimiter '|'; @@ -221,9 +221,9 @@ CREATE TABLE citus_mx_test_schema.nation_hash_composite_types( test_col citus_mx_test_schema.new_composite_type ); SELECT create_distributed_table('citus_mx_test_schema.nation_hash_composite_types', 'n_nationkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- insert some data to verify composite type queries @@ -252,9 +252,9 @@ CREATE TABLE lineitem_mx ( PRIMARY KEY(l_orderkey, l_linenumber) ); SET citus.shard_count TO 16; SELECT create_distributed_table('lineitem_mx', 'l_orderkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE INDEX lineitem_mx_time_index ON lineitem_mx (l_shipdate); @@ -270,9 +270,9 @@ CREATE TABLE orders_mx ( o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); SELECT create_distributed_table('orders_mx', 'o_orderkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE customer_mx ( @@ -285,9 +285,9 @@ CREATE TABLE customer_mx ( c_mktsegment char(10) not null, c_comment varchar(117) not null); SELECT create_reference_table('customer_mx'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE nation_mx ( @@ -296,9 +296,9 @@ CREATE TABLE nation_mx ( n_regionkey integer not null, n_comment varchar(152)); SELECT create_reference_table('nation_mx'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE part_mx ( @@ -312,9 +312,9 @@ CREATE TABLE part_mx ( p_retailprice decimal(15,2) not null, p_comment varchar(23) not null); SELECT create_reference_table('part_mx'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE supplier_mx @@ -328,9 +328,9 @@ CREATE TABLE supplier_mx s_comment varchar(101) not null ); SELECT create_reference_table('supplier_mx'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- Create test table for ddl @@ -340,9 +340,9 @@ CREATE TABLE mx_ddl_table ( ); SET citus.shard_count TO 4; SELECT create_distributed_table('mx_ddl_table', 'key', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Load some test data @@ -358,9 +358,9 @@ CREATE TABLE limit_orders_mx ( ); SET citus.shard_count TO 2; SELECT create_distributed_table('limit_orders_mx', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- test table for modifications @@ -369,17 +369,17 @@ CREATE TABLE multiple_hash_mx ( data text NOT NULL ); SELECT create_distributed_table('multiple_hash_mx', 'category'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET citus.shard_count TO 4; CREATE TABLE app_analytics_events_mx (id bigserial, app_id integer, name text); SELECT create_distributed_table('app_analytics_events_mx', 'app_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE researchers_mx ( @@ -389,9 +389,9 @@ CREATE TABLE researchers_mx ( ); SET citus.shard_count TO 2; SELECT create_distributed_table('researchers_mx', 'lab_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE labs_mx ( @@ -400,9 +400,9 @@ CREATE TABLE labs_mx ( ); SET citus.shard_count TO 1; SELECT create_distributed_table('labs_mx', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- now, for some special failures... @@ -411,9 +411,9 @@ CREATE TABLE objects_mx ( name text NOT NULL ); SELECT create_distributed_table('objects_mx', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE articles_hash_mx ( @@ -426,24 +426,24 @@ CREATE TABLE articles_hash_mx ( CREATE TABLE articles_single_shard_hash_mx (LIKE articles_hash_mx); SET citus.shard_count TO 2; SELECT create_distributed_table('articles_hash_mx', 'author_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET citus.shard_count TO 1; SELECT create_distributed_table('articles_single_shard_hash_mx', 'author_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET citus.shard_count TO 4; CREATE TABLE company_employees_mx (company_id int, employee_id int, manager_id int); SELECT create_distributed_table('company_employees_mx', 'company_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) WITH shard_counts AS ( @@ -452,8 +452,8 @@ WITH shard_counts AS ( SELECT logicalrelid, colocationid, shard_count, partmethod, repmodel FROM pg_dist_partition NATURAL JOIN shard_counts ORDER BY colocationid, logicalrelid; - logicalrelid | colocationid | shard_count | partmethod | repmodel ---------------------------------------------------------+--------------+-------------+------------+---------- + logicalrelid | colocationid | shard_count | partmethod | repmodel +--------------------------------------------------------------------- citus_mx_test_schema_join_1.nation_hash | 1390002 | 4 | h | s citus_mx_test_schema_join_1.nation_hash_2 | 1390002 | 4 | h | s citus_mx_test_schema_join_2.nation_hash | 1390002 | 4 | h | s diff --git a/src/test/regress/expected/multi_mx_ddl.out b/src/test/regress/expected/multi_mx_ddl.out index da543acb6..6deaa6dd0 100644 --- a/src/test/regress/expected/multi_mx_ddl.out +++ b/src/test/regress/expected/multi_mx_ddl.out @@ -1,7 +1,7 @@ -- Tests related to distributed DDL commands on mx cluster SELECT * FROM mx_ddl_table ORDER BY key; - key | value ------+------- + key | value +--------------------------------------------------------------------- 1 | 10 2 | 11 3 | 21 @@ -18,23 +18,23 @@ CREATE INDEX CONCURRENTLY ddl_test_concurrent_index ON mx_ddl_table(value); -- ADD COLUMN ALTER TABLE mx_ddl_table ADD COLUMN version INTEGER; -- SET DEFAULT -ALTER TABLE mx_ddl_table ALTER COLUMN version SET DEFAULT 1; +ALTER TABLE mx_ddl_table ALTER COLUMN version SET DEFAULT 1; UPDATE mx_ddl_table SET version=0.1 WHERE version IS NULL; -- SET NOT NULL ALTER TABLE mx_ddl_table ALTER COLUMN version SET NOT NULL; -- See that the changes are applied on coordinator, worker tables and shards SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; - Column | Type | Modifiers ----------+---------+-------------------- + Column | Type | Modifiers +--------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | version | integer | not null default 1 (3 rows) SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'ddl_test%_index'; - relname | Column | Type | Definition ----------------------------+--------+---------+------------ + relname | Column | Type | Definition +--------------------------------------------------------------------- ddl_test_index | value | integer | value ddl_test_concurrent_index | value | integer | value (2 rows) @@ -43,33 +43,33 @@ SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE -- make sure we don't break the following tests by hiding the shard names SET citus.override_table_visibility TO FALSE; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; - Column | Type | Modifiers ----------+---------+-------------------- + Column | Type | Modifiers +--------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | version | integer | not null default 1 (3 rows) SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'ddl_test%_index'; - relname | Column | Type | Definition ----------------------------+--------+---------+------------ + relname | Column | Type | Definition +--------------------------------------------------------------------- ddl_test_index | value | integer | value ddl_test_concurrent_index | value | integer | value (2 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass; - Column | Type | Modifiers ----------+---------+-------------------- + Column | Type | Modifiers +--------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | version | integer | not null default 1 (3 rows) SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'ddl_test%_index_1220088'; - relname | Column | Type | Definition ------------------------------------+--------+---------+------------ + relname | Column | Type | Definition +--------------------------------------------------------------------- ddl_test_index_1220088 | value | integer | value ddl_test_concurrent_index_1220088 | value | integer | value (2 rows) @@ -78,33 +78,33 @@ SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE -- make sure we don't break the following tests by hiding the shard names SET citus.override_table_visibility TO FALSE; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; - Column | Type | Modifiers ----------+---------+-------------------- + Column | Type | Modifiers +--------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | version | integer | not null default 1 (3 rows) SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'ddl_test%_index'; - relname | Column | Type | Definition ----------------------------+--------+---------+------------ + relname | Column | Type | Definition +--------------------------------------------------------------------- ddl_test_index | value | integer | value ddl_test_concurrent_index | value | integer | value (2 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220089'::regclass; - Column | Type | Modifiers ----------+---------+-------------------- + Column | Type | Modifiers +--------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | version | integer | not null default 1 (3 rows) SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'ddl_test%_index_1220089'; - relname | Column | Type | Definition ------------------------------------+--------+---------+------------ + relname | Column | Type | Definition +--------------------------------------------------------------------- ddl_test_index_1220089 | value | integer | value ddl_test_concurrent_index_1220089 | value | integer | value (2 rows) @@ -118,8 +118,8 @@ ALTER TABLE mx_ddl_table ALTER COLUMN version SET DATA TYPE double precision; INSERT INTO mx_ddl_table VALUES (78, 83, 2.1); \c - - - :worker_1_port SELECT * FROM mx_ddl_table ORDER BY key; - key | value | version ------+-------+--------- + key | value | version +--------------------------------------------------------------------- 1 | 10 | 0 2 | 11 | 0 3 | 21 | 0 @@ -146,70 +146,70 @@ ALTER TABLE mx_ddl_table ALTER COLUMN version DROP NOT NULL; ALTER TABLE mx_ddl_table DROP COLUMN version; -- See that the changes are applied on coordinator, worker tables and shards SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; - Column | Type | Modifiers ---------+---------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | (2 rows) \di ddl_test*_index List of relations - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- (0 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; - Column | Type | Modifiers ---------+---------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | (2 rows) \di ddl_test*_index List of relations - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- (0 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass; - Column | Type | Modifiers ---------+---------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | (2 rows) \di ddl_test*_index_1220088 List of relations - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- (0 rows) \c - - - :worker_2_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; - Column | Type | Modifiers ---------+---------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | (2 rows) \di ddl_test*_index List of relations - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- (0 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220089'::regclass; - Column | Type | Modifiers ---------+---------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | (2 rows) \di ddl_test*_index_1220089 List of relations - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- (0 rows) -- Show that DDL commands are done within a two-phase commit transaction @@ -222,9 +222,9 @@ SET citus.shard_count TO 4; SET citus.replication_model TO streaming; CREATE TABLE mx_sequence(key INT, value BIGSERIAL); SELECT create_distributed_table('mx_sequence', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port @@ -235,8 +235,8 @@ SELECT last_value AS worker_2_lastval FROM mx_sequence_value_seq \gset -- don't look at the actual values because they rely on the groupids of the nodes -- which can change depending on the tests which have run before this one SELECT :worker_1_lastval = :worker_2_lastval; - ?column? ----------- + ?column? +--------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/multi_mx_explain.out b/src/test/regress/expected/multi_mx_explain.out index bcf070035..fe2f78a29 100644 --- a/src/test/regress/expected/multi_mx_explain.out +++ b/src/test/regress/expected/multi_mx_explain.out @@ -70,7 +70,7 @@ Sort Task Count: 16 Tasks Shown: One of 16 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_mx_1220052 lineitem_mx @@ -104,7 +104,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) "Tasks Shown": "One of 16", "Tasks": [ { - "Node": "host=localhost port=57637 dbname=regression", + "Node": "host=localhost port=xxxxx dbname=regression", "Remote Plan": [ [ { @@ -180,7 +180,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML) One of 16 - host=localhost port=57637 dbname=regression + host=localhost port=xxxxx dbname=regression @@ -225,40 +225,40 @@ t EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -- Plan: +- Plan: Node Type: "Sort" Parallel Aware: false - Sort Key: + Sort Key: - "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))" - "remote_scan.l_quantity" - Plans: + Plans: - Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" Parent Relationship: "Outer" Parallel Aware: false - Group Key: + Group Key: - "remote_scan.l_quantity" - Plans: + Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Adaptive" Parallel Aware: false - Distributed Query: - Job: + Distributed Query: + Job: Task Count: 16 Tasks Shown: "One of 16" - Tasks: - - Node: "host=localhost port=57637 dbname=regression" - Remote Plan: - - Plan: + Tasks: + - Node: "host=localhost port=xxxxx dbname=regression" + Remote Plan: + - Plan: Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" Parallel Aware: false - Group Key: + Group Key: - "l_quantity" - Plans: + Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" Parallel Aware: false @@ -277,7 +277,7 @@ Sort Task Count: 16 Tasks Shown: One of 16 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_mx_1220052 lineitem_mx @@ -292,7 +292,7 @@ Aggregate Task Count: 16 Tasks Shown: One of 16 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate Output: sum(l_quantity), sum(l_quantity), count(l_quantity) -> Seq Scan on public.lineitem_mx_1220052 lineitem_mx @@ -309,7 +309,7 @@ Limit Task Count: 16 Tasks Shown: One of 16 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Sort Sort Key: lineitem_mx.l_quantity @@ -326,7 +326,7 @@ Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Insert on lineitem_mx_1220052 -> Result -- Test update @@ -338,7 +338,7 @@ Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_mx_1220052 lineitem_mx -> Index Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx Index Cond: (l_orderkey = 1) @@ -351,7 +351,7 @@ Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Delete on lineitem_mx_1220052 lineitem_mx -> Index Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx Index Cond: (l_orderkey = 1) @@ -368,7 +368,7 @@ Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Index Scan using lineitem_mx_pkey_1220055 on lineitem_mx_1220055 lineitem_mx Index Cond: (l_orderkey = 5) SELECT true AS valid FROM explain_xml($$ @@ -385,7 +385,7 @@ Custom Scan (Citus Adaptive) Task Count: 16 Tasks Shown: One of 16 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Seq Scan on lineitem_mx_1220052 lineitem_mx -- Test all tasks output SET citus.explain_all_tasks TO on; @@ -396,82 +396,82 @@ Aggregate Task Count: 16 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220053 on lineitem_mx_1220053 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220054 on lineitem_mx_1220054 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220055 on lineitem_mx_1220055 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220056 on lineitem_mx_1220056 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220057 on lineitem_mx_1220057 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220058 on lineitem_mx_1220058 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220059 on lineitem_mx_1220059 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220060 on lineitem_mx_1220060 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220061 on lineitem_mx_1220061 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220062 on lineitem_mx_1220062 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220063 on lineitem_mx_1220063 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220064 on lineitem_mx_1220064 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220065 lineitem_mx Filter: (l_orderkey > 9030) -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220066 on lineitem_mx_1220066 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220067 on lineitem_mx_1220067 lineitem_mx Index Cond: (l_orderkey > 9030) @@ -491,7 +491,7 @@ Aggregate Task Count: 16 Tasks Shown: One of 16 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx Index Cond: (l_orderkey > 9030) @@ -507,7 +507,7 @@ Aggregate Task Count: 16 Tasks Shown: One of 16 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Hash Join Hash Cond: (lineitem_mx.l_orderkey = orders_mx.o_orderkey) @@ -547,7 +547,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) "Tasks Shown": "One of 16", "Tasks": [ { - "Node": "host=localhost port=57637 dbname=regression", + "Node": "host=localhost port=xxxxx dbname=regression", "Remote Plan": [ [ { @@ -683,7 +683,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML) One of 16 - host=localhost port=57637 dbname=regression + host=localhost port=xxxxx dbname=regression @@ -799,43 +799,43 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; -- Plan: +- Plan: Node Type: "Aggregate" Strategy: "Plain" Partial Mode: "Simple" Parallel Aware: false - Plans: + Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Task-Tracker" Parallel Aware: false - Distributed Query: - Job: + Distributed Query: + Job: Task Count: 16 Tasks Shown: "One of 16" - Tasks: - - Node: "host=localhost port=57637 dbname=regression" - Remote Plan: - - Plan: + Tasks: + - Node: "host=localhost port=xxxxx dbname=regression" + Remote Plan: + - Plan: Node Type: "Aggregate" Strategy: "Plain" Partial Mode: "Simple" Parallel Aware: false - Plans: + Plans: - Node Type: "Hash Join" Parent Relationship: "Outer" Parallel Aware: false Join Type: "Inner" Inner Unique: false Hash Cond: "(lineitem_mx.l_orderkey = orders_mx.o_orderkey)" - Plans: + Plans: - Node Type: "Hash Join" Parent Relationship: "Outer" Parallel Aware: false Join Type: "Inner" Inner Unique: false Hash Cond: "(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)" - Plans: + Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" Parallel Aware: false @@ -844,7 +844,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) - Node Type: "Hash" Parent Relationship: "Inner" Parallel Aware: false - Plans: + Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" Parallel Aware: false @@ -853,14 +853,14 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) - Node Type: "Hash" Parent Relationship: "Inner" Parallel Aware: false - Plans: + Plans: - Node Type: "Hash Join" Parent Relationship: "Outer" Parallel Aware: false Join Type: "Inner" Inner Unique: false Hash Cond: "(customer_mx.c_custkey = orders_mx.o_custkey)" - Plans: + Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" Parallel Aware: false @@ -869,7 +869,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) - Node Type: "Hash" Parent Relationship: "Inner" Parallel Aware: false - Plans: + Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" Parallel Aware: false diff --git a/src/test/regress/expected/multi_mx_function_call_delegation.out b/src/test/regress/expected/multi_mx_function_call_delegation.out index 2f4f45b69..32dd3892d 100644 --- a/src/test/regress/expected/multi_mx_function_call_delegation.out +++ b/src/test/regress/expected/multi_mx_function_call_delegation.out @@ -6,9 +6,9 @@ SET citus.replication_model TO 'statement'; -- This table requires specific settings, create before getting into things create table mx_call_dist_table_replica(id int, val int); select create_distributed_table('mx_call_dist_table_replica', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) insert into mx_call_dist_table_replica values (9,1),(8,2),(7,3),(6,4),(5,5); @@ -19,42 +19,42 @@ SET citus.replication_model TO 'streaming'; -- create table mx_call_dist_table_1(id int, val int); select create_distributed_table('mx_call_dist_table_1', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) insert into mx_call_dist_table_1 values (3,1),(4,5),(9,2),(6,5),(3,5); create table mx_call_dist_table_2(id int, val int); select create_distributed_table('mx_call_dist_table_2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) insert into mx_call_dist_table_2 values (1,1),(1,2),(2,2),(3,3),(3,4); create table mx_call_dist_table_bigint(id bigint, val bigint); select create_distributed_table('mx_call_dist_table_bigint', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) insert into mx_call_dist_table_bigint values (1,1),(1,2),(2,2),(3,3),(3,4); create table mx_call_dist_table_ref(id int, val int); select create_reference_table('mx_call_dist_table_ref'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) insert into mx_call_dist_table_ref values (2,7),(1,8),(2,8),(1,8),(2,8); create type mx_call_enum as enum ('A', 'S', 'D', 'F'); create table mx_call_dist_table_enum(id int, key mx_call_enum); select create_distributed_table('mx_call_dist_table_enum', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) insert into mx_call_dist_table_enum values (1,'S'),(2,'A'),(3,'D'),(4,'F'); @@ -87,20 +87,20 @@ BEGIN END;$$; -- Test that undistributed functions have no issue executing select multi_mx_function_call_delegation.mx_call_func(2, 0); - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 29 (1 row) select multi_mx_function_call_delegation.mx_call_func_custom_types('S', 'A'); - mx_call_func_custom_types ---------------------------- + mx_call_func_custom_types +--------------------------------------------------------------------- (F,S) (1 row) select squares(4); - squares ---------- + squares +--------------------------------------------------------------------- (1,1) (2,4) (3,9) @@ -109,34 +109,34 @@ select squares(4); -- Same for unqualified name select mx_call_func(2, 0); - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 29 (1 row) -- Mark both functions as distributed ... select create_distributed_function('mx_call_func(int,int)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) select create_distributed_function('mx_call_func_bigint(bigint,bigint)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) select create_distributed_function('mx_call_func_custom_types(mx_call_enum,mx_call_enum)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) select create_distributed_function('squares(int)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) -- We still don't route them to the workers, because they aren't @@ -144,74 +144,74 @@ select create_distributed_function('squares(int)'); SET client_min_messages TO DEBUG1; select mx_call_func(2, 0); DEBUG: function does not have co-located tables -DEBUG: generating subplan 11_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment -DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 29 (1 row) select multi_mx_function_call_delegation.mx_call_func_bigint(4, 2); DEBUG: function does not have co-located tables - mx_call_func_bigint ---------------------- + mx_call_func_bigint +--------------------------------------------------------------------- 8 (1 row) select mx_call_func_custom_types('S', 'A'); DEBUG: function does not have co-located tables - mx_call_func_custom_types ---------------------------- + mx_call_func_custom_types +--------------------------------------------------------------------- (F,S) (1 row) -- Mark them as colocated with a table. Now we should route them to workers. select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass, 1); - colocate_proc_with_table --------------------------- - + colocate_proc_with_table +--------------------------------------------------------------------- + (1 row) select colocate_proc_with_table('mx_call_func_bigint', 'mx_call_dist_table_bigint'::regclass, 1); - colocate_proc_with_table --------------------------- - + colocate_proc_with_table +--------------------------------------------------------------------- + (1 row) select colocate_proc_with_table('mx_call_func_custom_types', 'mx_call_dist_table_enum'::regclass, 1); - colocate_proc_with_table --------------------------- - + colocate_proc_with_table +--------------------------------------------------------------------- + (1 row) select colocate_proc_with_table('squares', 'mx_call_dist_table_2'::regclass, 0); - colocate_proc_with_table --------------------------- - + colocate_proc_with_table +--------------------------------------------------------------------- + (1 row) select mx_call_func(2, 0); DEBUG: pushing down the function call - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 28 (1 row) select mx_call_func_bigint(4, 2); DEBUG: pushing down the function call - mx_call_func_bigint ---------------------- + mx_call_func_bigint +--------------------------------------------------------------------- 8 (1 row) select mx_call_func_custom_types('S', 'A'); DEBUG: pushing down the function call - mx_call_func_custom_types ---------------------------- + mx_call_func_custom_types +--------------------------------------------------------------------- (S,S) (1 row) @@ -220,15 +220,15 @@ DEBUG: pushing down the function call ERROR: input of anonymous composite types is not implemented select multi_mx_function_call_delegation.mx_call_func(2, 0); DEBUG: pushing down the function call - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 28 (1 row) select multi_mx_function_call_delegation.mx_call_func_custom_types('S', 'A'); DEBUG: pushing down the function call - mx_call_func_custom_types ---------------------------- + mx_call_func_custom_types +--------------------------------------------------------------------- (S,S) (1 row) @@ -236,14 +236,14 @@ DEBUG: pushing down the function call begin; select mx_call_func(2, 0); DEBUG: not pushing down function calls in a multi-statement transaction -DEBUG: generating subplan 13_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment -DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 29 (1 row) @@ -255,88 +255,88 @@ drop table mx_call_dist_table_enum; SET client_min_messages TO DEBUG1; select mx_call_func_custom_types('S', 'A'); DEBUG: function does not have co-located tables - mx_call_func_custom_types ---------------------------- + mx_call_func_custom_types +--------------------------------------------------------------------- (F,S) (1 row) -- Make sure we do bounds checking on distributed argument index -- This also tests that we have cache invalidation for pg_dist_object updates select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass, -1); - colocate_proc_with_table --------------------------- - + colocate_proc_with_table +--------------------------------------------------------------------- + (1 row) select mx_call_func(2, 0); DEBUG: function call does not have a distribution argument -DEBUG: generating subplan 15_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment -DEBUG: Plan 15 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('15_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 29 (1 row) select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass, 2); - colocate_proc_with_table --------------------------- - + colocate_proc_with_table +--------------------------------------------------------------------- + (1 row) select mx_call_func(2, 0); DEBUG: function call does not have a distribution argument -DEBUG: generating subplan 18_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment -DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 29 (1 row) -- We don't currently support colocating with reference tables select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_ref'::regclass, 1); - colocate_proc_with_table --------------------------- - + colocate_proc_with_table +--------------------------------------------------------------------- + (1 row) select mx_call_func(2, 0); DEBUG: cannnot push down function call for reference tables -DEBUG: generating subplan 20_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment -DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 29 (1 row) -- We don't currently support colocating with replicated tables select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_replica'::regclass, 1); - colocate_proc_with_table --------------------------- - + colocate_proc_with_table +--------------------------------------------------------------------- + (1 row) select mx_call_func(2, 0); DEBUG: cannot push down function call for replicated distributed tables -DEBUG: generating subplan 22_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment -DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 29 (1 row) @@ -344,9 +344,9 @@ SET client_min_messages TO NOTICE; drop table mx_call_dist_table_replica; SET client_min_messages TO DEBUG1; select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass, 1); - colocate_proc_with_table --------------------------- - + colocate_proc_with_table +--------------------------------------------------------------------- + (1 row) -- Test table returning functions. @@ -365,8 +365,8 @@ BEGIN END;$$; -- before distribution ... select mx_call_func_tbl(10); - mx_call_func_tbl ------------------- + mx_call_func_tbl +--------------------------------------------------------------------- (10,-1) (11,4) (2 rows) @@ -375,15 +375,15 @@ select mx_call_func_tbl(10); select create_distributed_function('mx_call_func_tbl(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) select mx_call_func_tbl(20); DEBUG: pushing down the function call - mx_call_func_tbl ------------------- + mx_call_func_tbl +--------------------------------------------------------------------- (20,-1) (21,4) (2 rows) @@ -398,25 +398,25 @@ END;$$; select create_distributed_function('mx_call_func_raise(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) select mx_call_func_raise(2); DEBUG: pushing down the function call DEBUG: warning -DETAIL: WARNING from localhost:57638 +DETAIL: WARNING from localhost:xxxxx ERROR: error -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx PL/pgSQL function multi_mx_function_call_delegation.mx_call_func_raise(integer) line 4 at RAISE -- Don't push-down when doing INSERT INTO ... SELECT func(); SET client_min_messages TO ERROR; CREATE TABLE test (x int primary key); SELECT create_distributed_table('test','x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE OR REPLACE FUNCTION delegated_function(a int) @@ -431,9 +431,9 @@ BEGIN END; $function$; SELECT create_distributed_function('delegated_function(int)', 'a'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) SET client_min_messages TO DEBUG1; @@ -446,21 +446,21 @@ SELECT * FROM test WHERE not exists( SELECT delegated_function(4) ); DEBUG: not pushing down function calls in CTEs or Subqueries -DEBUG: generating subplan 31_1 for subquery SELECT multi_mx_function_call_delegation.delegated_function(4) AS delegated_function -DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT x FROM multi_mx_function_call_delegation.test WHERE (NOT (EXISTS (SELECT intermediate_result.delegated_function FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(delegated_function integer)))) - x ---- +DEBUG: generating subplan XXX_1 for subquery SELECT multi_mx_function_call_delegation.delegated_function(4) AS delegated_function +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x FROM multi_mx_function_call_delegation.test WHERE (NOT (EXISTS (SELECT intermediate_result.delegated_function FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(delegated_function integer)))) + x +--------------------------------------------------------------------- (0 rows) WITH r AS ( SELECT delegated_function(7) ) SELECT * FROM test WHERE (SELECT count(*)=0 FROM r); -DEBUG: generating subplan 34_1 for CTE r: SELECT multi_mx_function_call_delegation.delegated_function(7) AS delegated_function +DEBUG: generating subplan XXX_1 for CTE r: SELECT multi_mx_function_call_delegation.delegated_function(7) AS delegated_function DEBUG: not pushing down function calls in CTEs or Subqueries -DEBUG: generating subplan 34_2 for subquery SELECT (count(*) OPERATOR(pg_catalog.=) 0) FROM (SELECT intermediate_result.delegated_function FROM read_intermediate_result('34_1'::text, 'binary'::citus_copy_format) intermediate_result(delegated_function integer)) r -DEBUG: Plan 34 query after replacing subqueries and CTEs: SELECT x FROM multi_mx_function_call_delegation.test WHERE (SELECT intermediate_result."?column?" FROM read_intermediate_result('34_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" boolean)) - x ---- +DEBUG: generating subplan XXX_2 for subquery SELECT (count(*) OPERATOR(pg_catalog.=) 0) FROM (SELECT intermediate_result.delegated_function FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(delegated_function integer)) r +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x FROM multi_mx_function_call_delegation.test WHERE (SELECT intermediate_result."?column?" FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" boolean)) + x +--------------------------------------------------------------------- (0 rows) WITH r AS ( @@ -468,12 +468,12 @@ WITH r AS ( ), t AS ( SELECT count(*) c FROM r ) SELECT * FROM test, t WHERE t.c=0; -DEBUG: generating subplan 38_1 for CTE r: SELECT multi_mx_function_call_delegation.delegated_function(10) AS delegated_function +DEBUG: generating subplan XXX_1 for CTE r: SELECT multi_mx_function_call_delegation.delegated_function(10) AS delegated_function DEBUG: not pushing down function calls in CTEs or Subqueries -DEBUG: generating subplan 38_2 for CTE t: SELECT count(*) AS c FROM (SELECT intermediate_result.delegated_function FROM read_intermediate_result('38_1'::text, 'binary'::citus_copy_format) intermediate_result(delegated_function integer)) r -DEBUG: Plan 38 query after replacing subqueries and CTEs: SELECT test.x, t.c FROM multi_mx_function_call_delegation.test, (SELECT intermediate_result.c FROM read_intermediate_result('38_2'::text, 'binary'::citus_copy_format) intermediate_result(c bigint)) t WHERE (t.c OPERATOR(pg_catalog.=) 0) - x | c ----+--- +DEBUG: generating subplan XXX_2 for CTE t: SELECT count(*) AS c FROM (SELECT intermediate_result.delegated_function FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(delegated_function integer)) r +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT test.x, t.c FROM multi_mx_function_call_delegation.test, (SELECT intermediate_result.c FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(c bigint)) t WHERE (t.c OPERATOR(pg_catalog.=) 0) + x | c +--------------------------------------------------------------------- (0 rows) WITH r AS ( @@ -483,52 +483,52 @@ WITH r AS ( ), t AS ( SELECT count(*) c FROM s ) SELECT * FROM test, r, t WHERE t.c=0; -DEBUG: generating subplan 42_1 for CTE r: SELECT count(*) AS count FROM multi_mx_function_call_delegation.test -DEBUG: generating subplan 42_2 for CTE s: SELECT multi_mx_function_call_delegation.delegated_function(13) AS delegated_function +DEBUG: generating subplan XXX_1 for CTE r: SELECT count(*) AS count FROM multi_mx_function_call_delegation.test +DEBUG: generating subplan XXX_2 for CTE s: SELECT multi_mx_function_call_delegation.delegated_function(13) AS delegated_function DEBUG: not pushing down function calls in CTEs or Subqueries -DEBUG: generating subplan 42_3 for CTE t: SELECT count(*) AS c FROM (SELECT intermediate_result.delegated_function FROM read_intermediate_result('42_2'::text, 'binary'::citus_copy_format) intermediate_result(delegated_function integer)) s -DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT test.x, r.count, t.c FROM multi_mx_function_call_delegation.test, (SELECT intermediate_result.count FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) r, (SELECT intermediate_result.c FROM read_intermediate_result('42_3'::text, 'binary'::citus_copy_format) intermediate_result(c bigint)) t WHERE (t.c OPERATOR(pg_catalog.=) 0) - x | count | c ----+-------+--- +DEBUG: generating subplan XXX_3 for CTE t: SELECT count(*) AS c FROM (SELECT intermediate_result.delegated_function FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(delegated_function integer)) s +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT test.x, r.count, t.c FROM multi_mx_function_call_delegation.test, (SELECT intermediate_result.count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) r, (SELECT intermediate_result.c FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(c bigint)) t WHERE (t.c OPERATOR(pg_catalog.=) 0) + x | count | c +--------------------------------------------------------------------- (0 rows) -- Test that we don't propagate to non-metadata worker nodes select stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) select stop_metadata_sync_to_node('localhost', :worker_2_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) select mx_call_func(2, 0); DEBUG: the worker node does not have metadata -DEBUG: generating subplan 47_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment -DEBUG: Plan 47 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('47_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 29 (1 row) SET client_min_messages TO NOTICE; select start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) select start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) -- stop_metadata_sync_to_node()/start_metadata_sync_to_node() might make @@ -546,55 +546,55 @@ CREATE FUNCTION mx_call_add(int, int) RETURNS int SELECT create_distributed_function('mx_call_add(int,int)', '$1'); DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) -- subquery parameters cannot be pushed down select mx_call_func((select x + 1 from mx_call_add(3, 4) x), 2); DEBUG: arguments in a distributed function must not contain subqueries -DEBUG: generating subplan 1_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment -DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT (9 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (9 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 35 (1 row) -- volatile parameter cannot be pushed down select mx_call_func(floor(random())::int, 2); DEBUG: arguments in a distributed function must be constant expressions -DEBUG: generating subplan 3_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment -DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 27 (1 row) -- test forms we don't distribute select * from mx_call_func(2, 0); -DEBUG: generating subplan 5_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment -DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - y ----- + y +--------------------------------------------------------------------- 29 (1 row) select mx_call_func(2, 0) from mx_call_dist_table_1; - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 28 28 28 @@ -607,31 +607,31 @@ select mx_call_func(2, 0) from mx_call_dist_table_1; (9 rows) select mx_call_func(2, 0) where mx_call_func(0, 2) = 0; -DEBUG: generating subplan 8_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment -DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- (0 rows) select mx_call_func(2, 0), mx_call_func(0, 2); -DEBUG: generating subplan 10_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment -DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment -DEBUG: generating subplan 13_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment -DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func | mx_call_func ---------------+-------------- + mx_call_func | mx_call_func +--------------------------------------------------------------------- 29 | 27 (1 row) @@ -640,8 +640,8 @@ DEBUG: not pushing down function calls in a multi-statement transaction CONTEXT: SQL statement "SELECT mx_call_func_tbl(40)" PL/pgSQL function inline_code_block line 1 at PERFORM SELECT * FROM mx_call_dist_table_1 WHERE id >= 40 ORDER BY id, val; - id | val -----+----- + id | val +--------------------------------------------------------------------- 40 | -1 41 | 4 (2 rows) @@ -650,43 +650,43 @@ SELECT * FROM mx_call_dist_table_1 WHERE id >= 40 ORDER BY id, val; PREPARE call_plan (int, int) AS SELECT mx_call_func($1, $2); EXECUTE call_plan(2, 0); DEBUG: pushing down the function call - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 28 (1 row) EXECUTE call_plan(2, 0); DEBUG: pushing down the function call - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 28 (1 row) EXECUTE call_plan(2, 0); DEBUG: pushing down the function call - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 28 (1 row) EXECUTE call_plan(2, 0); DEBUG: pushing down the function call - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 28 (1 row) EXECUTE call_plan(2, 0); DEBUG: pushing down the function call - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 28 (1 row) EXECUTE call_plan(2, 0); DEBUG: pushing down the function call - mx_call_func --------------- + mx_call_func +--------------------------------------------------------------------- 28 (1 row) diff --git a/src/test/regress/expected/multi_mx_hide_shard_names.out b/src/test/regress/expected/multi_mx_hide_shard_names.out index 409d97fff..cbc57ef7b 100644 --- a/src/test/regress/expected/multi_mx_hide_shard_names.out +++ b/src/test/regress/expected/multi_mx_hide_shard_names.out @@ -14,10 +14,10 @@ FROM WHERE proname LIKE '%table_is_visible%' ORDER BY 1; - proname | proisstrict | proretset | provolatile | proparallel | pronargs | pronargdefaults | prorettype | proargtypes | proacl -------------------------+-------------+-----------+-------------+-------------+----------+-----------------+------------+-------------+-------- - citus_table_is_visible | t | f | s | s | 1 | 0 | 16 | 26 | - pg_table_is_visible | t | f | s | s | 1 | 0 | 16 | 26 | + proname | proisstrict | proretset | provolatile | proparallel | pronargs | pronargdefaults | prorettype | proargtypes | proacl +--------------------------------------------------------------------- + citus_table_is_visible | t | f | s | s | 1 | 0 | 16 | 26 | + pg_table_is_visible | t | f | s | s | 1 | 0 | 16 | 26 | (2 rows) CREATE SCHEMA mx_hide_shard_names; @@ -26,34 +26,34 @@ SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_table(id int, time date); SELECT create_distributed_table('test_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- first show that the views does not show -- any shards on the coordinator as expected SELECT * FROM citus_shards_on_worker; - Schema | Name | Type | Owner ---------+------+------+------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- (0 rows) SELECT * FROM citus_shard_indexes_on_worker; - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- (0 rows) -- now show that we see the shards, but not the @@ -61,15 +61,15 @@ SELECT * FROM citus_shard_indexes_on_worker; \c - - - :worker_1_port SET search_path TO 'mx_hide_shard_names'; SELECT * FROM citus_shards_on_worker ORDER BY 2; - Schema | Name | Type | Owner ----------------------+--------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- mx_hide_shard_names | test_table_1130000 | table | postgres mx_hide_shard_names | test_table_1130002 | table | postgres (2 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- (0 rows) -- also show that nested calls to pg_table_is_visible works fine @@ -84,8 +84,8 @@ SELECT NOT pg_table_is_visible("t1"."Name"::regclass) LIMIT 1)); - pg_table_is_visible ---------------------- + pg_table_is_visible +--------------------------------------------------------------------- f (1 row) @@ -98,15 +98,15 @@ CREATE INDEX test_index ON mx_hide_shard_names.test_table(id); \c - - - :worker_1_port SET search_path TO 'mx_hide_shard_names'; SELECT * FROM citus_shards_on_worker ORDER BY 2; - Schema | Name | Type | Owner ----------------------+--------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- mx_hide_shard_names | test_table_1130000 | table | postgres mx_hide_shard_names | test_table_1130002 | table | postgres (2 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; - Schema | Name | Type | Owner | Table ----------------------+--------------------+-------+----------+-------------------- + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- mx_hide_shard_names | test_index_1130000 | index | postgres | test_table_1130000 mx_hide_shard_names | test_index_1130002 | index | postgres | test_table_1130002 (2 rows) @@ -114,22 +114,22 @@ SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; -- we should be able to select from the shards directly if we -- know the name of the tables SELECT count(*) FROM test_table_1130000; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- disable the config so that table becomes visible SELECT pg_table_is_visible('test_table_1130000'::regclass); - pg_table_is_visible ---------------------- + pg_table_is_visible +--------------------------------------------------------------------- f (1 row) SET citus.override_table_visibility TO FALSE; SELECT pg_table_is_visible('test_table_1130000'::regclass); - pg_table_is_visible ---------------------- + pg_table_is_visible +--------------------------------------------------------------------- t (1 row) @@ -143,9 +143,9 @@ SET citus.replication_model TO 'streaming'; -- not existing shard ids appended to the distributed table name CREATE TABLE test_table_102008(id int, time date); SELECT create_distributed_table('test_table_102008', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port @@ -156,8 +156,8 @@ SET search_path TO 'mx_hide_shard_names'; -- name already exists :) CREATE TABLE test_table_2_1130000(id int, time date); SELECT * FROM citus_shards_on_worker ORDER BY 2; - Schema | Name | Type | Owner ----------------------+---------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- mx_hide_shard_names | test_table_102008_1130004 | table | postgres mx_hide_shard_names | test_table_102008_1130006 | table | postgres mx_hide_shard_names | test_table_1130000 | table | postgres @@ -166,8 +166,8 @@ SELECT * FROM citus_shards_on_worker ORDER BY 2; \d List of relations - Schema | Name | Type | Owner ----------------------+----------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- mx_hide_shard_names | test_table | table | postgres mx_hide_shard_names | test_table_102008 | table | postgres mx_hide_shard_names | test_table_2_1130000 | table | postgres @@ -182,17 +182,17 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE test_table(id int, time date); SELECT create_distributed_table('test_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE INDEX test_index ON mx_hide_shard_names_2.test_table(id); \c - - - :worker_1_port SET search_path TO 'mx_hide_shard_names'; SELECT * FROM citus_shards_on_worker ORDER BY 2; - Schema | Name | Type | Owner ----------------------+---------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- mx_hide_shard_names | test_table_102008_1130004 | table | postgres mx_hide_shard_names | test_table_102008_1130006 | table | postgres mx_hide_shard_names | test_table_1130000 | table | postgres @@ -200,36 +200,36 @@ SELECT * FROM citus_shards_on_worker ORDER BY 2; (4 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; - Schema | Name | Type | Owner | Table ----------------------+--------------------+-------+----------+-------------------- + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- mx_hide_shard_names | test_index_1130000 | index | postgres | test_table_1130000 mx_hide_shard_names | test_index_1130002 | index | postgres | test_table_1130002 (2 rows) SET search_path TO 'mx_hide_shard_names_2'; SELECT * FROM citus_shards_on_worker ORDER BY 2; - Schema | Name | Type | Owner ------------------------+--------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- mx_hide_shard_names_2 | test_table_1130008 | table | postgres mx_hide_shard_names_2 | test_table_1130010 | table | postgres (2 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; - Schema | Name | Type | Owner | Table ------------------------+--------------------+-------+----------+-------------------- + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- mx_hide_shard_names_2 | test_index_1130008 | index | postgres | test_table_1130008 mx_hide_shard_names_2 | test_index_1130010 | index | postgres | test_table_1130010 (2 rows) SET search_path TO 'mx_hide_shard_names_2, mx_hide_shard_names'; SELECT * FROM citus_shards_on_worker ORDER BY 2; - Schema | Name | Type | Owner ---------+------+------+------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- (0 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- (0 rows) -- now try very long table names @@ -244,24 +244,24 @@ CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, col2 integer not null); SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port SET search_path TO 'mx_hide_shard_names_3'; SELECT * FROM citus_shards_on_worker ORDER BY 2; - Schema | Name | Type | Owner ------------------------+-----------------------------------------------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- mx_hide_shard_names_3 | too_long_1234567890123456789012345678901234567_e0119164_1130012 | table | postgres mx_hide_shard_names_3 | too_long_1234567890123456789012345678901234567_e0119164_1130014 | table | postgres (2 rows) \d List of relations - Schema | Name | Type | Owner ------------------------+-------------------------------------------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- mx_hide_shard_names_3 | too_long_12345678901234567890123456789012345678901234567890 | table | postgres (1 row) @@ -276,38 +276,38 @@ CREATE TABLE "TeeNTabLE.1!?!"(id int, "TeNANt_Id" int); CREATE INDEX "MyTenantIndex" ON "CiTuS.TeeN"."TeeNTabLE.1!?!"("TeNANt_Id"); -- create distributed table with weird names SELECT create_distributed_table('"CiTuS.TeeN"."TeeNTabLE.1!?!"', 'TeNANt_Id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port SET search_path TO "CiTuS.TeeN"; SELECT * FROM citus_shards_on_worker ORDER BY 2; - Schema | Name | Type | Owner -------------+------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- CiTuS.TeeN | TeeNTabLE.1!?!_1130016 | table | postgres CiTuS.TeeN | TeeNTabLE.1!?!_1130018 | table | postgres (2 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; - Schema | Name | Type | Owner | Table -------------+-----------------------+-------+----------+------------------------ + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- CiTuS.TeeN | MyTenantIndex_1130016 | index | postgres | TeeNTabLE.1!?!_1130016 CiTuS.TeeN | MyTenantIndex_1130018 | index | postgres | TeeNTabLE.1!?!_1130018 (2 rows) \d List of relations - Schema | Name | Type | Owner -------------+----------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- CiTuS.TeeN | TeeNTabLE.1!?! | table | postgres (1 row) \di List of relations - Schema | Name | Type | Owner | Table -------------+---------------+-------+----------+---------------- + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- CiTuS.TeeN | MyTenantIndex | index | postgres | TeeNTabLE.1!?! (1 row) @@ -318,16 +318,16 @@ SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; SET search_path TO 'mx_hide_shard_names'; \d List of relations - Schema | Name | Type | Owner ----------------------+-------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- mx_hide_shard_names | test_table | table | postgres mx_hide_shard_names | test_table_102008 | table | postgres (2 rows) \di List of relations - Schema | Name | Type | Owner | Table ----------------------+------------+-------+----------+------------ + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- mx_hide_shard_names | test_index | index | postgres | test_table (1 row) diff --git a/src/test/regress/expected/multi_mx_metadata.out b/src/test/regress/expected/multi_mx_metadata.out index 4bab9a2af..3198db3c4 100644 --- a/src/test/regress/expected/multi_mx_metadata.out +++ b/src/test/regress/expected/multi_mx_metadata.out @@ -2,8 +2,8 @@ -- Temporarily disable automatic 2PC recovery ALTER SYSTEM SET citus.recover_2pc_interval TO -1; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) @@ -32,103 +32,103 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; SET citus.shard_count TO 4; SELECT create_distributed_table('distributed_mx_table', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Verify that we've logged commit records SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) -- Confirm that the metadata transactions have been committed SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) -- Verify that the commit records have been removed SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; - Column | Type | Modifiers -----------+--------+------------------------------------------------------------------------- + Column | Type | Modifiers +--------------------------------------------------------------------- key | text | not null - value | jsonb | + value | jsonb | some_val | bigint | not null default nextval('distributed_mx_table_some_val_seq'::regclass) (3 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'distributed_mx_table_pkey'::regclass; - Column | Type | Definition ---------+------+------------ + Column | Type | Definition +--------------------------------------------------------------------- key | text | key (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'distributed_mx_table_value_idx'::regclass; - Column | Type | Definition ---------+------+------------ + Column | Type | Definition +--------------------------------------------------------------------- value | text | value (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid = 'distributed_mx_table'::regclass; - repmodel ----------- + repmodel +--------------------------------------------------------------------- s (1 row) SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'distributed_mx_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) \c - - - :worker_2_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; - Column | Type | Modifiers -----------+--------+------------------------------------------------------------------------- + Column | Type | Modifiers +--------------------------------------------------------------------- key | text | not null - value | jsonb | + value | jsonb | some_val | bigint | not null default nextval('distributed_mx_table_some_val_seq'::regclass) (3 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'distributed_mx_table_pkey'::regclass; - Column | Type | Definition ---------+------+------------ + Column | Type | Definition +--------------------------------------------------------------------- key | text | key (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'distributed_mx_table_value_idx'::regclass; - Column | Type | Definition ---------+------+------------ + Column | Type | Definition +--------------------------------------------------------------------- value | text | value (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid = 'distributed_mx_table'::regclass; - repmodel ----------- + repmodel +--------------------------------------------------------------------- s (1 row) SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'distributed_mx_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -142,17 +142,17 @@ CREATE TABLE should_not_exist ( value jsonb ); SELECT create_distributed_table('should_not_exist', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ABORT; -- Verify that the table does not exist on the worker \c - - - :worker_1_port SELECT count(*) FROM pg_tables WHERE tablename = 'should_not_exist'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -166,9 +166,9 @@ CREATE TABLE should_not_exist ( value jsonb ); SELECT create_distributed_table('should_not_exist', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) PREPARE TRANSACTION 'this_should_fail'; @@ -184,9 +184,9 @@ CREATE TABLE objects_for_xacts ( name text NOT NULL ); SELECT create_distributed_table('objects_for_xacts', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) COMMIT; @@ -194,22 +194,22 @@ COMMIT; \c - - - :worker_1_port SELECT repmodel FROM pg_dist_partition WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass; - repmodel ----------- + repmodel +--------------------------------------------------------------------- s (1 row) SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) \c - - - :master_port SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; --- now show that we can rollback on creating mx table, but shards remain.... +-- now show that we can rollback on creating mx table, but shards remain.... BEGIN; CREATE SCHEMA IF NOT EXISTS citus_mx_schema_for_xacts; NOTICE: schema "citus_mx_schema_for_xacts" already exists, skipping @@ -221,31 +221,31 @@ CREATE TABLE objects_for_xacts2 ( name text NOT NULL ); SELECT create_distributed_table('objects_for_xacts2', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ROLLBACK; -- show that the table not exists on the coordinator SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schemaname = 'citus_mx_schema_for_xacts'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) \c - - - :worker_1_port -- the distributed table not exists on the worker node SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schemaname = 'citus_mx_schema_for_xacts'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- shard also does not exist since we create shards in a transaction SELECT count(*) FROM pg_tables WHERE tablename LIKE 'objects_for_xacts2_%' and schemaname = 'citus_mx_schema_for_xacts'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -255,8 +255,8 @@ ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. -- Ensure pg_dist_transaction is empty for test SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) @@ -277,28 +277,28 @@ SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_po INSERT INTO pg_dist_transaction VALUES (:worker_1_group, 'citus_0_should_commit'); INSERT INTO pg_dist_transaction VALUES (:worker_1_group, 'citus_0_should_be_forgotten'); SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 3 (1 row) SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- Confirm that transactions were correctly rolled forward \c - - - :worker_1_port SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit'; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -307,47 +307,47 @@ CREATE USER no_access_mx; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE USER no_access_mx;$$); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) SET ROLE no_access_mx; -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ DROP TABLE distributed_mx_table; $$); ERROR: must be owner of the object CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ SELECT master_remove_distributed_table_metadata_from_workers('distributed_mx_table'::regclass, 'public', 'distributed_mx_table'); $$); ERROR: must be owner of the object CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ SELECT master_drop_all_shards('distributed_mx_table'::regclass, 'public', 'distributed_mx_table'); $$); ERROR: must be owner of the object CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ SELECT master_remove_partition_metadata('distributed_mx_table'::regclass, 'public', 'distributed_mx_table'); $$); ERROR: must be owner of the object CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ SELECT master_drop_sequences(ARRAY['public.distributed_mx_table_some_val_seq']); $$); ERROR: must be owner of the object CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ SELECT master_drop_sequences(ARRAY['distributed_mx_table_some_val_seq']); $$); ERROR: must be owner of the object CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE SELECT master_drop_sequences(ARRAY['non_existing_schema.distributed_mx_table_some_val_seq']); - master_drop_sequences ------------------------ - + master_drop_sequences +--------------------------------------------------------------------- + (1 row) SELECT master_drop_sequences(ARRAY['']); @@ -355,9 +355,9 @@ ERROR: invalid name syntax SELECT master_drop_sequences(ARRAY['public.']); ERROR: invalid name syntax SELECT master_drop_sequences(ARRAY['public.distributed_mx_table_some_val_seq_not_existing']); - master_drop_sequences ------------------------ - + master_drop_sequences +--------------------------------------------------------------------- + (1 row) -- make sure that we can drop unrelated tables/sequences @@ -365,18 +365,18 @@ CREATE TABLE unrelated_table(key serial); DROP TABLE unrelated_table; -- doesn't error out but it has no effect, so no need to error out SELECT master_drop_sequences(NULL); - master_drop_sequences ------------------------ - + master_drop_sequences +--------------------------------------------------------------------- + (1 row) \c - postgres - :master_port -- finally make sure that the sequence remains SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; - Column | Type | Modifiers -----------+--------+------------------------------------------------------------------------- + Column | Type | Modifiers +--------------------------------------------------------------------- key | text | not null - value | jsonb | + value | jsonb | some_val | bigint | not null default nextval('distributed_mx_table_some_val_seq'::regclass) (3 rows) @@ -391,22 +391,22 @@ BEGIN END IF; END; $$LANGUAGE plpgsql; -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ DROP TABLE distributed_mx_table; $$); ERROR: must be owner of the object CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ SELECT master_remove_distributed_table_metadata_from_workers('distributed_mx_table'::regclass, 'public', 'distributed_mx_table'); $$); ERROR: must be owner of the object CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ SELECT master_drop_sequences(ARRAY['public.distributed_mx_table_some_val_seq']); $$); - raise_failed_aclcheck ------------------------ - + raise_failed_aclcheck +--------------------------------------------------------------------- + (1 row) SELECT master_drop_all_shards('distributed_mx_table'::regclass, 'public', 'distributed_mx_table'); @@ -421,10 +421,10 @@ DROP TABLE unrelated_table; \c - postgres - :worker_1_port -- finally make sure that the sequence remains SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; - Column | Type | Modifiers -----------+--------+------------------------------------------------------------------------- + Column | Type | Modifiers +--------------------------------------------------------------------- key | text | not null - value | jsonb | + value | jsonb | some_val | bigint | not null default nextval('distributed_mx_table_some_val_seq'::regclass) (3 rows) @@ -432,8 +432,8 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx \c - postgres - :master_port ALTER SYSTEM RESET citus.recover_2pc_interval; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_mx_modifications.out b/src/test/regress/expected/multi_mx_modifications.out index 1bb6b7a60..86132f12f 100644 --- a/src/test/regress/expected/multi_mx_modifications.out +++ b/src/test/regress/expected/multi_mx_modifications.out @@ -6,8 +6,8 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1330000; INSERT INTO limit_orders_mx VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32743; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -16,8 +16,8 @@ SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32743; INSERT INTO limit_orders_mx VALUES (32744, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32744; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -27,15 +27,15 @@ SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32744; INSERT INTO limit_orders_mx VALUES (32745, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32745; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- and see all the inserted rows SELECT * FROM limit_orders_mx ORDER BY 1; - id | symbol | bidder_id | placed_at | kind | limit_price --------+--------+-----------+--------------------------+------+------------- + id | symbol | bidder_id | placed_at | kind | limit_price +--------------------------------------------------------------------- 32743 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 32744 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 32745 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 @@ -43,8 +43,8 @@ SELECT * FROM limit_orders_mx ORDER BY 1; -- basic single-row INSERT with RETURNING INSERT INTO limit_orders_mx VALUES (32746, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69) RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price --------+--------+-----------+--------------------------+------+------------- + id | symbol | bidder_id | placed_at | kind | limit_price +--------------------------------------------------------------------- 32746 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 (1 row) @@ -52,8 +52,8 @@ INSERT INTO limit_orders_mx VALUES (32746, 'AAPL', 9580, '2004-10-19 10:23:54', INSERT INTO limit_orders_mx VALUES (12756, 'MSFT', 10959, '2013-05-08 07:29:23', 'sell', DEFAULT); SELECT * FROM limit_orders_mx WHERE id = 12756; - id | symbol | bidder_id | placed_at | kind | limit_price --------+--------+-----------+--------------------------+------+------------- + id | symbol | bidder_id | placed_at | kind | limit_price +--------------------------------------------------------------------- 12756 | MSFT | 10959 | Wed May 08 07:29:23 2013 | sell | 0.00 (1 row) @@ -61,8 +61,8 @@ SELECT * FROM limit_orders_mx WHERE id = 12756; INSERT INTO limit_orders_mx VALUES (430, upper('ibm'), 214, timestamp '2003-01-28 10:31:17' + interval '5 hours', 'buy', sqrt(2)); SELECT * FROM limit_orders_mx WHERE id = 430; - id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+----------------- + id | symbol | bidder_id | placed_at | kind | limit_price +--------------------------------------------------------------------- 430 | IBM | 214 | Tue Jan 28 15:31:17 2003 | buy | 1.4142135623731 (1 row) @@ -111,43 +111,43 @@ INSERT INTO limit_orders_mx SELECT * FROM deleted_orders; -- test simple DELETE INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) DELETE FROM limit_orders_mx WHERE id = 246; SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- test simple DELETE with RETURNING DELETE FROM limit_orders_mx WHERE id = 430 RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+----------------- + id | symbol | bidder_id | placed_at | kind | limit_price +--------------------------------------------------------------------- 430 | IBM | 214 | Tue Jan 28 15:31:17 2003 | buy | 1.4142135623731 (1 row) SELECT COUNT(*) FROM limit_orders_mx WHERE id = 430; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- DELETE with expression in WHERE clause INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) DELETE FROM limit_orders_mx WHERE id = (2 * 123); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -169,45 +169,45 @@ INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'se -- simple UPDATE UPDATE limit_orders_mx SET symbol = 'GM' WHERE id = 246; SELECT symbol FROM limit_orders_mx WHERE id = 246; - symbol --------- + symbol +--------------------------------------------------------------------- GM (1 row) -- simple UPDATE with RETURNING UPDATE limit_orders_mx SET symbol = 'GM' WHERE id = 246 RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+------------- + id | symbol | bidder_id | placed_at | kind | limit_price +--------------------------------------------------------------------- 246 | GM | 162 | Mon Jul 02 16:32:15 2007 | sell | 20.69 (1 row) -- expression UPDATE UPDATE limit_orders_mx SET bidder_id = 6 * 3 WHERE id = 246; SELECT bidder_id FROM limit_orders_mx WHERE id = 246; - bidder_id ------------ + bidder_id +--------------------------------------------------------------------- 18 (1 row) -- expression UPDATE with RETURNING UPDATE limit_orders_mx SET bidder_id = 6 * 5 WHERE id = 246 RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+------------- + id | symbol | bidder_id | placed_at | kind | limit_price +--------------------------------------------------------------------- 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | sell | 20.69 (1 row) -- multi-column UPDATE UPDATE limit_orders_mx SET (kind, limit_price) = ('buy', DEFAULT) WHERE id = 246; SELECT kind, limit_price FROM limit_orders_mx WHERE id = 246; - kind | limit_price -------+------------- + kind | limit_price +--------------------------------------------------------------------- buy | 0.00 (1 row) -- multi-column UPDATE with RETURNING UPDATE limit_orders_mx SET (kind, limit_price) = ('buy', 999) WHERE id = 246 RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+------------- + id | symbol | bidder_id | placed_at | kind | limit_price +--------------------------------------------------------------------- 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | buy | 999 (1 row) @@ -230,8 +230,8 @@ ERROR: relation bidders is not distributed WITH deleted_orders AS (INSERT INTO limit_orders_mx VALUES (399, 'PDR', 14, '2017-07-02 16:32:15', 'sell', 43)) UPDATE limit_orders_mx SET symbol = 'GM'; SELECT symbol, bidder_id FROM limit_orders_mx WHERE id = 246; - symbol | bidder_id ---------+----------- + symbol | bidder_id +--------------------------------------------------------------------- GM | 30 (1 row) @@ -242,15 +242,15 @@ UPDATE limit_orders_mx SET bidder_id = bidder_id + 1 WHERE id = 246; -- IMMUTABLE functions are allowed UPDATE limit_orders_mx SET symbol = LOWER(symbol) WHERE id = 246; SELECT symbol, bidder_id FROM limit_orders_mx WHERE id = 246; - symbol | bidder_id ---------+----------- + symbol | bidder_id +--------------------------------------------------------------------- gm | 247 (1 row) -- IMMUTABLE functions are allowed -- even in returning UPDATE limit_orders_mx SET symbol = UPPER(symbol) WHERE id = 246 RETURNING id, LOWER(symbol), symbol; - id | lower | symbol ------+-------+-------- + id | lower | symbol +--------------------------------------------------------------------- 246 | gm | GM (1 row) @@ -276,8 +276,8 @@ UPDATE limit_orders_mx SET array_of_values = stable_append_mx(array_of_values, 3) WHERE id = 246; ERROR: STABLE functions used in UPDATE queries cannot be called with column references SELECT array_of_values FROM limit_orders_mx WHERE id = 246; - array_of_values ------------------ + array_of_values +--------------------------------------------------------------------- {1,2} (1 row) @@ -287,8 +287,8 @@ CREATE FUNCTION temp_strict_func(integer,integer) RETURNS integer AS UPDATE limit_orders_mx SET bidder_id = temp_strict_func(1, null) WHERE id = 246; ERROR: null value in column "bidder_id" violates not-null constraint SELECT array_of_values FROM limit_orders_mx WHERE id = 246; - array_of_values ------------------ + array_of_values +--------------------------------------------------------------------- {1,2} (1 row) @@ -311,8 +311,8 @@ INSERT INTO multiple_hash_mx VALUES ('0', '4'); INSERT INTO multiple_hash_mx VALUES ('0', '5'); INSERT INTO multiple_hash_mx VALUES ('0', '6'); UPDATE multiple_hash_mx SET data = data ||'-1' WHERE category = '0' RETURNING *; - category | data -----------+------ + category | data +--------------------------------------------------------------------- 0 | 1-1 0 | 2-1 0 | 3-1 @@ -322,8 +322,8 @@ UPDATE multiple_hash_mx SET data = data ||'-1' WHERE category = '0' RETURNING *; (6 rows) DELETE FROM multiple_hash_mx WHERE category = '0' RETURNING *; - category | data -----------+------ + category | data +--------------------------------------------------------------------- 0 | 1-1 0 | 2-1 0 | 3-1 @@ -347,8 +347,8 @@ INSERT 0 1 INSERT INTO multiple_hash_mx VALUES ('2', '3'); INSERT 0 1 INSERT INTO multiple_hash_mx VALUES ('2', '3') RETURNING *; - category | data -----------+------ + category | data +--------------------------------------------------------------------- 2 | 3 (1 row) @@ -362,8 +362,8 @@ UPDATE multiple_hash_mx SET data = data ||'-2' WHERE category = '1'; UPDATE 3 -- three rows, with RETURNING UPDATE multiple_hash_mx SET data = data ||'-2' WHERE category = '1' RETURNING category; - category ----------- + category +--------------------------------------------------------------------- 1 1 1 @@ -372,8 +372,8 @@ UPDATE multiple_hash_mx SET data = data ||'-2' WHERE category = '1' RETURNING ca UPDATE 3 -- check SELECT * FROM multiple_hash_mx WHERE category = '1' ORDER BY category, data; - category | data -----------+--------- + category | data +--------------------------------------------------------------------- 1 | 1-1-2-2 1 | 2-2-2 1 | 3-2-2 @@ -388,8 +388,8 @@ DELETE FROM multiple_hash_mx WHERE category = '2'; DELETE 3 -- three rows, with RETURNING DELETE FROM multiple_hash_mx WHERE category = '1' RETURNING category; - category ----------- + category +--------------------------------------------------------------------- 1 1 1 @@ -398,13 +398,13 @@ DELETE FROM multiple_hash_mx WHERE category = '1' RETURNING category; DELETE 3 -- check SELECT * FROM multiple_hash_mx WHERE category = '1' ORDER BY category, data; - category | data -----------+------ + category | data +--------------------------------------------------------------------- (0 rows) SELECT * FROM multiple_hash_mx WHERE category = '2' ORDER BY category, data; - category | data -----------+------ + category | data +--------------------------------------------------------------------- (0 rows) --- INSERT ... SELECT ... FROM commands are supported from workers @@ -430,33 +430,33 @@ SELECT minimum_value::bigint AS min_value, SELECT last_value FROM app_analytics_events_mx_id_seq \gset ALTER SEQUENCE app_analytics_events_mx_id_seq NO MINVALUE NO MAXVALUE; SELECT setval('app_analytics_events_mx_id_seq'::regclass, 3940649673949184); - setval ------------------- + setval +--------------------------------------------------------------------- 3940649673949184 (1 row) INSERT INTO app_analytics_events_mx VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; - id ------------------- + id +--------------------------------------------------------------------- 3940649673949185 (1 row) INSERT INTO app_analytics_events_mx (app_id, name) VALUES (102, 'Wayz') RETURNING id; - id ------------------- + id +--------------------------------------------------------------------- 3940649673949186 (1 row) INSERT INTO app_analytics_events_mx (app_id, name) VALUES (103, 'Mynt') RETURNING *; - id | app_id | name -------------------+--------+------ + id | app_id | name +--------------------------------------------------------------------- 3940649673949187 | 103 | Mynt (1 row) -- clean up SELECT setval('app_analytics_events_mx_id_seq'::regclass, :last_value); - setval ------------------- + setval +--------------------------------------------------------------------- 4503599627370497 (1 row) diff --git a/src/test/regress/expected/multi_mx_modifications_to_reference_tables.out b/src/test/regress/expected/multi_mx_modifications_to_reference_tables.out index b8febf421..fb2961934 100644 --- a/src/test/regress/expected/multi_mx_modifications_to_reference_tables.out +++ b/src/test/regress/expected/multi_mx_modifications_to_reference_tables.out @@ -8,36 +8,36 @@ SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) CREATE TABlE ref_table(id int, value_1 int); SELECT create_reference_table('ref_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABlE ref_table_2(id int, value_1 int); SELECT create_reference_table('ref_table_2'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_table_1(id int, value_1 int); SELECT create_distributed_table('test_table_1', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_table_1 VALUES(5,5),(6,6); @@ -46,37 +46,37 @@ SET search_path TO 'mx_modify_reference_table'; -- Simple DML operations from the first worker node INSERT INTO ref_table VALUES(1,1),(2,2); SELECT SUM(value_1) FROM ref_table; - sum ------ + sum +--------------------------------------------------------------------- 3 (1 row) UPDATE ref_table SET value_1 = 1 WHERE id = 2; SELECT SUM(value_1) FROM ref_table; - sum ------ + sum +--------------------------------------------------------------------- 2 (1 row) DELETE FROM ref_table; SELECT SUM(value_1) FROM ref_table; - sum ------ - + sum +--------------------------------------------------------------------- + (1 row) COPY ref_table FROM STDIN DELIMITER ','; SELECT SUM(value_1) FROM ref_table; - sum ------ + sum +--------------------------------------------------------------------- 3 (1 row) -- Select For Update also follows the same logic with modification. -- It has been started to be supported on MX nodes with DML operations. SELECT * FROM ref_table FOR UPDATE; - id | value_1 -----+--------- + id | value_1 +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -85,15 +85,15 @@ SELECT * FROM ref_table FOR UPDATE; -- queries are also supported on MX nodes. INSERT INTO ref_table SELECT * FROM test_table_1; SELECT SUM(value_1) FROM ref_table; - sum ------ + sum +--------------------------------------------------------------------- 14 (1 row) INSERT INTO ref_table_2 SELECT * FROM ref_table; SELECT SUM(value_1) FROM ref_table_2; - sum ------ + sum +--------------------------------------------------------------------- 14 (1 row) @@ -101,52 +101,52 @@ SELECT SUM(value_1) FROM ref_table_2; \c - - - :worker_2_port SET search_path TO 'mx_modify_reference_table'; SELECT SUM(value_1) FROM ref_table; - sum ------ + sum +--------------------------------------------------------------------- 14 (1 row) SELECT SUM(value_1) FROM ref_table_2; - sum ------ + sum +--------------------------------------------------------------------- 14 (1 row) --- Run basic queries from second worker node. These tests have been added +-- Run basic queries from second worker node. These tests have been added -- since locking logic is slightly different between running these commands -- from first worker node and the second one INSERT INTO ref_table VALUES(1,1),(2,2); SELECT SUM(value_1) FROM ref_table; - sum ------ + sum +--------------------------------------------------------------------- 17 (1 row) UPDATE ref_table SET value_1 = 1 WHERE id = 2; SELECT SUM(value_1) FROM ref_table; - sum ------ + sum +--------------------------------------------------------------------- 15 (1 row) COPY ref_table FROM STDIN DELIMITER ','; SELECT SUM(value_1) FROM ref_table; - sum ------ + sum +--------------------------------------------------------------------- 18 (1 row) INSERT INTO ref_table SELECT * FROM test_table_1; SELECT SUM(value_1) FROM ref_table; - sum ------ + sum +--------------------------------------------------------------------- 29 (1 row) INSERT INTO ref_table_2 SELECT * FROM ref_table; SELECT SUM(value_1) FROM ref_table_2; - sum ------ + sum +--------------------------------------------------------------------- 43 (1 row) diff --git a/src/test/regress/expected/multi_mx_modifying_xacts.out b/src/test/regress/expected/multi_mx_modifying_xacts.out index f8a9e03f2..0466c847e 100644 --- a/src/test/regress/expected/multi_mx_modifying_xacts.out +++ b/src/test/regress/expected/multi_mx_modifying_xacts.out @@ -13,8 +13,8 @@ DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2; INSERT INTO researchers_mx VALUES (2, 1, 'John Backus'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2; - name -------------- + name +--------------------------------------------------------------------- John Backus (1 row) @@ -25,8 +25,8 @@ DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2; INSERT INTO researchers_mx VALUES (2, 1, 'John Backus Worker 1'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2; - name ----------------------- + name +--------------------------------------------------------------------- John Backus Worker 1 (1 row) @@ -37,8 +37,8 @@ DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2; INSERT INTO researchers_mx VALUES (2, 1, 'John Backus Worker 2'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2; - name ----------------------- + name +--------------------------------------------------------------------- John Backus Worker 2 (1 row) @@ -48,8 +48,8 @@ BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1; - name --------------- + name +--------------------------------------------------------------------- Donald Knuth (1 row) @@ -59,8 +59,8 @@ BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1; - name --------------- + name +--------------------------------------------------------------------- Donald Knuth (1 row) @@ -70,8 +70,8 @@ BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1; - name --------------- + name +--------------------------------------------------------------------- Donald Knuth (1 row) @@ -84,8 +84,8 @@ SAVEPOINT hire_thompson; INSERT INTO researchers_mx VALUES (6, 3, 'Ken Thompson'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 3 AND id = 6; - name --------------- + name +--------------------------------------------------------------------- Ken Thompson (1 row) @@ -108,8 +108,8 @@ INSERT INTO researchers_mx VALUES (8, 4, 'Douglas Engelbart'); ROLLBACK TO hire_engelbart; COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 4; - name ----------- + name +--------------------------------------------------------------------- Jim Gray (1 row) @@ -130,8 +130,8 @@ INSERT INTO researchers_mx VALUES (8, 5, 'Douglas Engelbart'); INSERT INTO labs_mx VALUES (5, 'Los Alamos'); COMMIT; SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id and researchers_mx.lab_id = 5;; - id | lab_id | name | id | name -----+--------+-------------------+----+------------ + id | lab_id | name | id | name +--------------------------------------------------------------------- 8 | 5 | Douglas Engelbart | 5 | Los Alamos (1 row) @@ -149,8 +149,8 @@ INSERT INTO researchers_mx VALUES (8, 5, 'Douglas Engelbart'); INSERT INTO labs_mx VALUES (5, 'Los Alamos'); COMMIT; SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id and researchers_mx.lab_id = 5; - id | lab_id | name | id | name -----+--------+-------------------+----+------------ + id | lab_id | name | id | name +--------------------------------------------------------------------- 8 | 5 | Douglas Engelbart | 5 | Los Alamos 8 | 5 | Douglas Engelbart | 5 | Los Alamos 8 | 5 | Douglas Engelbart | 5 | Los Alamos @@ -171,8 +171,8 @@ BEGIN; SET LOCAL citus.enable_local_execution TO off; INSERT INTO labs_mx VALUES (6, 'Bell labs_mx'); SELECT count(*) FROM researchers_mx WHERE lab_id = 6; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -187,8 +187,8 @@ COMMIT; BEGIN; \copy labs_mx from stdin delimiter ',' SELECT name FROM labs_mx WHERE id = 10; - name ------------------- + name +--------------------------------------------------------------------- Weyland-Yutani-1 Weyland-Yutani-2 (2 rows) @@ -201,12 +201,12 @@ BEGIN; INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO objects_mx VALUES (1, 'orange'); ERROR: duplicate key value violates unique constraint "objects_mx_pkey_1220103" -DETAIL: Key (id)=(1) already exists. +DETAIL: Key (id)=(X) already exists. COMMIT; -- data shouldn't have persisted... SELECT * FROM objects_mx WHERE id = 1; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) -- same test on the second worker node @@ -216,13 +216,13 @@ BEGIN; INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO objects_mx VALUES (1, 'orange'); ERROR: duplicate key value violates unique constraint "objects_mx_pkey_1220103" -DETAIL: Key (id)=(1) already exists. -CONTEXT: while executing command on localhost:57637 +DETAIL: Key (id)=(X) already exists. +CONTEXT: while executing command on localhost:xxxxx COMMIT; -- data shouldn't have persisted... SELECT * FROM objects_mx WHERE id = 1; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) -- create trigger on one worker to reject certain values @@ -249,13 +249,13 @@ ERROR: illegal value COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 2; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 7; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) -- same failure test from worker 2 @@ -268,13 +268,13 @@ ERROR: illegal value COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 2; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 7; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) \c - - - :worker_1_port @@ -296,13 +296,13 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 1; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 8; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) -- same test from the other worker @@ -318,13 +318,13 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 1; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 8; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) -- what if the failures happen at COMMIT time? @@ -342,19 +342,19 @@ INSERT INTO objects_mx VALUES (2, 'BAD'); INSERT INTO labs_mx VALUES (9, 'Umbrella Corporation'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:57637 -WARNING: could not commit transaction for shard 1220103 on any active node -WARNING: could not commit transaction for shard 1220102 on any active node +WARNING: failed to commit transaction on localhost:xxxxx +WARNING: could not commit transaction for shard xxxxx on any active node +WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 2; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 7; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) DROP TRIGGER reject_bad_mx ON labs_mx_1220102; @@ -370,19 +370,19 @@ INSERT INTO labs_mx VALUES (8, 'Aperture Science'); INSERT INTO labs_mx VALUES (9, 'BAD'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:57637 -WARNING: could not commit transaction for shard 1220103 on any active node -WARNING: could not commit transaction for shard 1220102 on any active node +WARNING: failed to commit transaction on localhost:xxxxx +WARNING: could not commit transaction for shard xxxxx on any active node +WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 1; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 8; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) -- what if one shard (objects_mx) succeeds but another (labs_mx) completely fails? @@ -395,18 +395,18 @@ INSERT INTO labs_mx VALUES (8, 'Aperture Science'); INSERT INTO labs_mx VALUES (9, 'BAD'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:57637 -WARNING: could not commit transaction for shard 1220103 on any active node -WARNING: could not commit transaction for shard 1220102 on any active node +WARNING: failed to commit transaction on localhost:xxxxx +WARNING: could not commit transaction for shard xxxxx on any active node +WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node -- no data should persists SELECT * FROM objects_mx WHERE id = 1; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 8; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_mx_node_metadata.out b/src/test/regress/expected/multi_mx_node_metadata.out index c5b5718a1..b44d69a84 100644 --- a/src/test/regress/expected/multi_mx_node_metadata.out +++ b/src/test/regress/expected/multi_mx_node_metadata.out @@ -22,75 +22,75 @@ $$; -- add a node to the cluster SELECT master_add_node('localhost', :worker_1_port) As nodeid_1 \gset SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | nodename | nodeport | hasmetadata | metadatasynced ---------+-----------+----------+-------------+---------------- + nodeid | nodename | nodeport | hasmetadata | metadatasynced +--------------------------------------------------------------------- 2 | localhost | 57637 | f | f (1 row) -- create couple of tables CREATE TABLE ref_table(a int primary key); SELECT create_reference_table('ref_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE dist_table_1(a int primary key, b int references ref_table(a)); SELECT create_distributed_table('dist_table_1', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- update the node SELECT 1 FROM master_update_node((SELECT nodeid FROM pg_dist_node), 'localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | nodename | nodeport | hasmetadata | metadatasynced ---------+-----------+----------+-------------+---------------- + nodeid | nodename | nodeport | hasmetadata | metadatasynced +--------------------------------------------------------------------- 2 | localhost | 57638 | f | f (1 row) -- start syncing metadata to the node SELECT 1 FROM start_metadata_sync_to_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | nodename | nodeport | hasmetadata | metadatasynced ---------+-----------+----------+-------------+---------------- + nodeid | nodename | nodeport | hasmetadata | metadatasynced +--------------------------------------------------------------------- 2 | localhost | 57638 | t | t (1 row) --------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Test that maintenance daemon syncs after master_update_node --------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Update the node again. We do this as epeatable read, so we just see the -- changes by master_update_node(). This is to avoid inconsistent results -- if the maintenance daemon does the metadata sync too fast. BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | nodename | nodeport | hasmetadata | metadatasynced ---------+-----------+----------+-------------+---------------- + nodeid | nodename | nodeport | hasmetadata | metadatasynced +--------------------------------------------------------------------- 2 | localhost | 57638 | t | t (1 row) SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | nodename | nodeport | hasmetadata | metadatasynced ---------+-----------+----------+-------------+---------------- + nodeid | nodename | nodeport | hasmetadata | metadatasynced +--------------------------------------------------------------------- 2 | localhost | 57637 | t | f (1 row) @@ -98,20 +98,20 @@ END; -- wait until maintenance daemon does the next metadata sync, and then -- check if metadata is synced again SELECT wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | hasmetadata | metadatasynced ---------+-------------+---------------- + nodeid | hasmetadata | metadatasynced +--------------------------------------------------------------------- 2 | t | t (1 row) SELECT verify_metadata('localhost', :worker_1_port); - verify_metadata ------------------ + verify_metadata +--------------------------------------------------------------------- t (1 row) @@ -119,93 +119,93 @@ SELECT verify_metadata('localhost', :worker_1_port); -- a unwriteable node. BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | nodename | nodeport | hasmetadata | metadatasynced ---------+-----------+----------+-------------+---------------- + nodeid | nodename | nodeport | hasmetadata | metadatasynced +--------------------------------------------------------------------- 2 | localhost | 57637 | t | t (1 row) SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | nodename | nodeport | hasmetadata | metadatasynced ---------+-----------+----------+-------------+---------------- + nodeid | nodename | nodeport | hasmetadata | metadatasynced +--------------------------------------------------------------------- 2 | localhost | 12345 | t | f (1 row) END; -- maintenace daemon metadata sync should fail, because node is still unwriteable. SELECT wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | hasmetadata | metadatasynced ---------+-------------+---------------- + nodeid | hasmetadata | metadatasynced +--------------------------------------------------------------------- 2 | t | f (1 row) -- update it back to :worker_1_port, now metadata should be synced SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | hasmetadata | metadatasynced ---------+-------------+---------------- + nodeid | hasmetadata | metadatasynced +--------------------------------------------------------------------- 2 | t | t (1 row) --------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Test updating a node when another node is in readonly-mode --------------------------------------------------------------------------- +--------------------------------------------------------------------- SELECT master_add_node('localhost', :worker_2_port) AS nodeid_2 \gset -NOTICE: Replicating reference table "ref_table" to the node localhost:57638 +NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx SELECT 1 FROM start_metadata_sync_to_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) -- Create a table with shards on both nodes CREATE TABLE dist_table_2(a int); SELECT create_distributed_table('dist_table_2', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO dist_table_2 SELECT i FROM generate_series(1, 100) i; SELECT mark_node_readonly('localhost', :worker_2_port, TRUE); - mark_node_readonly --------------------- + mark_node_readonly +--------------------------------------------------------------------- t (1 row) -- Now updating the other node will mark worker 2 as not synced. BEGIN; SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; - nodeid | hasmetadata | metadatasynced ---------+-------------+---------------- + nodeid | hasmetadata | metadatasynced +--------------------------------------------------------------------- 2 | t | f 3 | t | f (2 rows) @@ -214,312 +214,312 @@ COMMIT; -- worker_2 is out of sync, so further updates aren't sent to it and -- we shouldn't see the warnings. SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 23456); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; - nodeid | hasmetadata | metadatasynced ---------+-------------+---------------- + nodeid | hasmetadata | metadatasynced +--------------------------------------------------------------------- 2 | t | f 3 | t | f (2 rows) -- Make the node writeable. SELECT mark_node_readonly('localhost', :worker_2_port, FALSE); - mark_node_readonly --------------------- + mark_node_readonly +--------------------------------------------------------------------- t (1 row) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) -- Mark the node readonly again, so the following master_update_node warns SELECT mark_node_readonly('localhost', :worker_2_port, TRUE); - mark_node_readonly --------------------- + mark_node_readonly +--------------------------------------------------------------------- t (1 row) -- Revert the nodeport of worker 1. BEGIN; SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM dist_table_2; - count -------- + count +--------------------------------------------------------------------- 100 (1 row) END; SELECT wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) -- Make the node writeable. SELECT mark_node_readonly('localhost', :worker_2_port, FALSE); - mark_node_readonly --------------------- + mark_node_readonly +--------------------------------------------------------------------- t (1 row) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port), verify_metadata('localhost', :worker_2_port); - verify_metadata | verify_metadata ------------------+----------------- + verify_metadata | verify_metadata +--------------------------------------------------------------------- t | t (1 row) --------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Test that master_update_node rolls back properly --------------------------------------------------------------------------- +--------------------------------------------------------------------- BEGIN; SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) ROLLBACK; SELECT verify_metadata('localhost', :worker_1_port), verify_metadata('localhost', :worker_2_port); - verify_metadata | verify_metadata ------------------+----------------- + verify_metadata | verify_metadata +--------------------------------------------------------------------- t | t (1 row) --------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Test that master_update_node can appear in a prepared transaction. --------------------------------------------------------------------------- +--------------------------------------------------------------------- BEGIN; SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) PREPARE TRANSACTION 'tx01'; COMMIT PREPARED 'tx01'; SELECT wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; - nodeid | hasmetadata | metadatasynced ---------+-------------+---------------- + nodeid | hasmetadata | metadatasynced +--------------------------------------------------------------------- 2 | t | f 3 | t | t (2 rows) BEGIN; SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) PREPARE TRANSACTION 'tx01'; COMMIT PREPARED 'tx01'; SELECT wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; - nodeid | hasmetadata | metadatasynced ---------+-------------+---------------- + nodeid | hasmetadata | metadatasynced +--------------------------------------------------------------------- 2 | t | t 3 | t | t (2 rows) SELECT verify_metadata('localhost', :worker_1_port), verify_metadata('localhost', :worker_2_port); - verify_metadata | verify_metadata ------------------+----------------- + verify_metadata | verify_metadata +--------------------------------------------------------------------- t | t (1 row) --------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Test that changes in isactive is propagated to the metadata nodes --------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Don't drop the reference table so it has shards on the nodes being disabled DROP TABLE dist_table_1, dist_table_2; SELECT 1 FROM master_disable_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port); - verify_metadata ------------------ + verify_metadata +--------------------------------------------------------------------- t (1 row) SELECT 1 FROM master_activate_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "ref_table" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port); - verify_metadata ------------------ + verify_metadata +--------------------------------------------------------------------- t (1 row) ------------------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Test master_disable_node() when the node that is being disabled is actually down ------------------------------------------------------------------------------------- +--------------------------------------------------------------------- SELECT master_update_node(:nodeid_2, 'localhost', 1); - master_update_node --------------------- - + master_update_node +--------------------------------------------------------------------- + (1 row) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) -- set metadatasynced so we try porpagating metadata changes UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2); -- should error out SELECT 1 FROM master_disable_node('localhost', 1); -ERROR: Disabling localhost:1 failed -DETAIL: connection error: localhost:1 +ERROR: Disabling localhost:xxxxx failed +DETAIL: connection error: localhost:xxxxx HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them. -- try again after stopping metadata sync SELECT stop_metadata_sync_to_node('localhost', 1); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT 1 FROM master_disable_node('localhost', 1); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port); - verify_metadata ------------------ + verify_metadata +--------------------------------------------------------------------- t (1 row) SELECT master_update_node(:nodeid_2, 'localhost', :worker_2_port); - master_update_node --------------------- - + master_update_node +--------------------------------------------------------------------- + (1 row) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) SELECT 1 FROM master_activate_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "ref_table" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port); - verify_metadata ------------------ + verify_metadata +--------------------------------------------------------------------- t (1 row) ------------------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Test master_disable_node() when the other node is down ------------------------------------------------------------------------------------- +--------------------------------------------------------------------- -- node 1 is down. SELECT master_update_node(:nodeid_1, 'localhost', 1); - master_update_node --------------------- - + master_update_node +--------------------------------------------------------------------- + (1 row) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) -- set metadatasynced so we try porpagating metadata changes UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2); -- should error out SELECT 1 FROM master_disable_node('localhost', :worker_2_port); -ERROR: Disabling localhost:57638 failed -DETAIL: connection error: localhost:1 +ERROR: Disabling localhost:xxxxx failed +DETAIL: connection error: localhost:xxxxx HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them. -- try again after stopping metadata sync SELECT stop_metadata_sync_to_node('localhost', 1); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT 1 FROM master_disable_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) -- bring up node 1 SELECT master_update_node(:nodeid_1, 'localhost', :worker_1_port); - master_update_node --------------------- - + master_update_node +--------------------------------------------------------------------- + (1 row) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) SELECT 1 FROM master_activate_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "ref_table" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port); - verify_metadata ------------------ + verify_metadata +--------------------------------------------------------------------- t (1 row) @@ -527,8 +527,8 @@ SELECT verify_metadata('localhost', :worker_1_port); DROP TABLE ref_table; TRUNCATE pg_dist_colocation; SELECT count(*) FROM (SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node) t; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) diff --git a/src/test/regress/expected/multi_mx_partitioning.out b/src/test/regress/expected/multi_mx_partitioning.out index 3554bb303..b985bc146 100644 --- a/src/test/regress/expected/multi_mx_partitioning.out +++ b/src/test/regress/expected/multi_mx_partitioning.out @@ -7,9 +7,9 @@ SET citus.shard_replication_factor TO 1; -- make sure wen can create partitioning tables in MX SET citus.replication_model TO 'streaming'; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) -- 1-) Distributing partitioned table @@ -27,16 +27,16 @@ INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03'); SELECT create_distributed_table('partitioning_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- see from MX node, the data is loaded to shards \c - - - :worker_1_port SELECT * FROM partitioning_test ORDER BY 1; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 3 | 09-09-2009 @@ -51,8 +51,8 @@ FROM WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - logicalrelid ------------------------- + logicalrelid +--------------------------------------------------------------------- partitioning_test partitioning_test_2009 partitioning_test_2010 @@ -66,8 +66,8 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count -------------------------+------- + logicalrelid | count +--------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2009 | 4 partitioning_test_2010 | 4 @@ -75,8 +75,8 @@ ORDER BY -- see from MX node, partitioning hierarchy is built SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1; - inhrelid ------------------------- + inhrelid +--------------------------------------------------------------------- partitioning_test_2009 partitioning_test_2010 (2 rows) @@ -95,8 +95,8 @@ FROM WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2011') ORDER BY 1; - logicalrelid ------------------------- + logicalrelid +--------------------------------------------------------------------- partitioning_test partitioning_test_2011 (2 rows) @@ -109,16 +109,16 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count -------------------------+------- + logicalrelid | count +--------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2011 | 4 (2 rows) -- see from MX node, partitioning hierarchy is built SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1; - inhrelid ------------------------- + inhrelid +--------------------------------------------------------------------- partitioning_test_2009 partitioning_test_2010 partitioning_test_2011 @@ -143,8 +143,8 @@ FROM WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2012') ORDER BY 1; - logicalrelid ------------------------- + logicalrelid +--------------------------------------------------------------------- partitioning_test partitioning_test_2012 (2 rows) @@ -157,16 +157,16 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count -------------------------+------- + logicalrelid | count +--------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2012 | 4 (2 rows) -- see from MX node, see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 3 | 09-09-2009 @@ -177,8 +177,8 @@ SELECT * FROM partitioning_test ORDER BY 1; -- see from MX node, partitioning hierarchy is built SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1; - inhrelid ------------------------- + inhrelid +--------------------------------------------------------------------- partitioning_test_2009 partitioning_test_2010 partitioning_test_2011 @@ -191,9 +191,9 @@ SET citus.shard_replication_factor TO 1; -- 4-) Attaching distributed table to distributed table CREATE TABLE partitioning_test_2013(id int, time date); SELECT create_distributed_table('partitioning_test_2013', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- load some data @@ -203,8 +203,8 @@ ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2013 FOR VALUES -- see from MX node, see the data is loaded to shards \c - - - :worker_1_port SELECT * FROM partitioning_test ORDER BY 1; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 3 | 09-09-2009 @@ -217,8 +217,8 @@ SELECT * FROM partitioning_test ORDER BY 1; -- see from MX node, partitioning hierarchy is built SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1; - inhrelid ------------------------- + inhrelid +--------------------------------------------------------------------- partitioning_test_2009 partitioning_test_2010 partitioning_test_2011 @@ -232,8 +232,8 @@ ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_2009; -- see from MX node, partitioning hierarchy is built \c - - - :worker_1_port SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1; - inhrelid ------------------------- + inhrelid +--------------------------------------------------------------------- partitioning_test_2010 partitioning_test_2011 partitioning_test_2012 @@ -247,21 +247,21 @@ HINT: Connect to the coordinator and run it again. \c - - - :master_port -- make sure we can repeatedly call start_metadata_sync_to_node SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) -- make sure we can drop partitions @@ -278,9 +278,9 @@ CREATE SCHEMA partition_test; SET SEARCH_PATH TO partition_test; CREATE TABLE partition_parent_table(a int, b int, c int) PARTITION BY RANGE (b); SELECT create_distributed_table('partition_parent_table', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE partition_0 PARTITION OF partition_parent_table FOR VALUES FROM (1) TO (10); diff --git a/src/test/regress/expected/multi_mx_reference_table.out b/src/test/regress/expected/multi_mx_reference_table.out index 6dc428df8..a84ea15e5 100644 --- a/src/test/regress/expected/multi_mx_reference_table.out +++ b/src/test/regress/expected/multi_mx_reference_table.out @@ -2,9 +2,9 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000; \c - - - :master_port CREATE TABLE reference_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); @@ -14,15 +14,15 @@ INSERT INTO reference_table_test VALUES (4, 4.0, '4', '2016-12-04'); INSERT INTO reference_table_test VALUES (5, 5.0, '5', '2016-12-05'); -- SELECT .. FOR UPDATE should work on coordinator (takes lock on first worker) SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; - value_1 | value_2 ----------+--------- + value_1 | value_2 +--------------------------------------------------------------------- 1 | 1 (1 row) BEGIN; SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; - value_1 | value_2 ----------+--------- + value_1 | value_2 +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -30,15 +30,15 @@ END; \c - - - :worker_1_port -- SELECT .. FOR UPDATE should work on first worker (takes lock on self) SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; - value_1 | value_2 ----------+--------- + value_1 | value_2 +--------------------------------------------------------------------- 1 | 1 (1 row) BEGIN; SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; - value_1 | value_2 ----------+--------- + value_1 | value_2 +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -48,8 +48,8 @@ SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 @@ -63,8 +63,8 @@ FROM reference_table_test WHERE value_1 = 1; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -75,8 +75,8 @@ FROM reference_table_test ORDER BY 2 ASC LIMIT 3; - value_1 | value_2 ----------+--------- + value_1 | value_2 +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -90,8 +90,8 @@ WHERE value_2 >= 4 ORDER BY 2 LIMIT 3; - value_1 | value_3 ----------+--------- + value_1 | value_3 +--------------------------------------------------------------------- 4 | 4 5 | 5 (2 rows) @@ -103,8 +103,8 @@ FROM ORDER BY 2 ASC LIMIT 2; - value_1 | ?column? ----------+---------- + value_1 | ?column? +--------------------------------------------------------------------- 1 | 15 2 | 30 (2 rows) @@ -115,8 +115,8 @@ FROM reference_table_test ORDER BY 2 ASC LIMIT 2 OFFSET 2; - value_1 | ?column? ----------+---------- + value_1 | ?column? +--------------------------------------------------------------------- 3 | 45 4 | 60 (2 rows) @@ -127,8 +127,8 @@ FROM reference_table_test WHERE value_2 = 2 OR value_2 = 3; - value_2 | value_4 ----------+-------------------------- + value_2 | value_4 +--------------------------------------------------------------------- 2 | Fri Dec 02 00:00:00 2016 3 | Sat Dec 03 00:00:00 2016 (2 rows) @@ -139,8 +139,8 @@ FROM reference_table_test WHERE value_2 = 2 AND value_2 = 3; - value_2 | value_4 ----------+--------- + value_2 | value_4 +--------------------------------------------------------------------- (0 rows) SELECT @@ -149,8 +149,8 @@ FROM reference_table_test WHERE value_3 = '2' OR value_1 = 3; - value_2 | value_4 ----------+-------------------------- + value_2 | value_4 +--------------------------------------------------------------------- 2 | Fri Dec 02 00:00:00 2016 3 | Sat Dec 03 00:00:00 2016 (2 rows) @@ -164,8 +164,8 @@ WHERE value_3 = '2' OR value_1 = 3 ) AND FALSE; - value_2 | value_4 ----------+--------- + value_2 | value_4 +--------------------------------------------------------------------- (0 rows) SELECT @@ -181,8 +181,8 @@ WHERE reference_table_test ) AND value_1 < 3; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (2 rows) @@ -196,8 +196,8 @@ WHERE ( '1', '2' ); - value_4 --------------------------- + value_4 +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 Fri Dec 02 00:00:00 2016 (2 rows) @@ -211,8 +211,8 @@ WHERE ( '5', '2' ); - date_part ------------ + date_part +--------------------------------------------------------------------- 2 5 (2 rows) @@ -223,8 +223,8 @@ FROM reference_table_test WHERE value_2 <= 2 AND value_2 >= 4; - value_4 ---------- + value_4 +--------------------------------------------------------------------- (0 rows) SELECT @@ -233,8 +233,8 @@ FROM reference_table_test WHERE value_2 <= 20 AND value_2 >= 4; - value_4 --------------------------- + value_4 +--------------------------------------------------------------------- Sun Dec 04 00:00:00 2016 Mon Dec 05 00:00:00 2016 (2 rows) @@ -245,8 +245,8 @@ FROM reference_table_test WHERE value_2 >= 5 AND value_2 <= random(); - value_4 ---------- + value_4 +--------------------------------------------------------------------- (0 rows) SELECT @@ -255,8 +255,8 @@ FROM reference_table_test WHERE value_4 BETWEEN '2016-12-01' AND '2016-12-03'; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 1 2 3 @@ -268,8 +268,8 @@ FROM reference_table_test WHERE FALSE; - value_1 ---------- + value_1 +--------------------------------------------------------------------- (0 rows) SELECT @@ -278,8 +278,8 @@ FROM reference_table_test WHERE int4eq(1, 2); - value_1 ---------- + value_1 +--------------------------------------------------------------------- (0 rows) -- rename output name and do some operations @@ -287,8 +287,8 @@ SELECT value_1 as id, value_2 * 15 as age FROM reference_table_test; - id | age -----+----- + id | age +--------------------------------------------------------------------- 1 | 15 2 | 30 3 | 45 @@ -302,8 +302,8 @@ SELECT * FROM some_data; - value_2 | value_4 ----------+-------------------------- + value_2 | value_4 +--------------------------------------------------------------------- 3 | Sat Dec 03 00:00:00 2016 4 | Sun Dec 04 00:00:00 2016 5 | Mon Dec 05 00:00:00 2016 @@ -312,8 +312,8 @@ FROM -- queries with CTEs are supported even if CTE is not referenced inside query WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3) SELECT * FROM reference_table_test ORDER BY 1 LIMIT 1; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -324,8 +324,8 @@ FROM reference_table_test, position('om' in 'Thomas') WHERE value_1 = 1; - value_1 | value_2 | value_3 | value_4 | position ----------+---------+---------+--------------------------+---------- + value_1 | value_2 | value_3 | value_4 | position +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3 (1 row) @@ -335,8 +335,8 @@ FROM reference_table_test, position('om' in 'Thomas') WHERE value_1 = 1 OR value_1 = 2; - value_1 | value_2 | value_3 | value_4 | position ----------+---------+---------+--------------------------+---------- + value_1 | value_2 | value_3 | value_4 | position +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 | 3 (2 rows) @@ -348,8 +348,8 @@ SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 (2 rows) @@ -360,8 +360,8 @@ SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -371,8 +371,8 @@ SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+--------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- (0 rows) -- to make the tests more interested for aggregation tests, ingest some more data @@ -392,8 +392,8 @@ HAVING SUM(value_2) > 3 ORDER BY 1; - value_4 | sum ---------------------------+----- + value_4 | sum +--------------------------------------------------------------------- Fri Dec 02 00:00:00 2016 | 4 Sat Dec 03 00:00:00 2016 | 6 Sun Dec 04 00:00:00 2016 | 4 @@ -409,8 +409,8 @@ FROM GROUP BY GROUPING sets ((value_4), (value_3)) ORDER BY 1, 2, 3; - value_4 | value_3 | sum ---------------------------+---------+----- + value_4 | value_3 | sum +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | | 2 Fri Dec 02 00:00:00 2016 | | 4 Sat Dec 03 00:00:00 2016 | | 6 @@ -430,8 +430,8 @@ FROM reference_table_test ORDER BY 1; - value_4 --------------------------- + value_4 +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 Fri Dec 02 00:00:00 2016 Sat Dec 03 00:00:00 2016 @@ -444,8 +444,8 @@ SELECT value_4, RANK() OVER (PARTITION BY value_1 ORDER BY value_4) FROM reference_table_test; - value_4 | rank ---------------------------+------ + value_4 | rank +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | 1 Thu Dec 01 00:00:00 2016 | 1 Fri Dec 02 00:00:00 2016 | 1 @@ -461,8 +461,8 @@ SELECT value_4, AVG(value_1) OVER (PARTITION BY value_4 ORDER BY value_4) FROM reference_table_test; - value_4 | avg ---------------------------+------------------------ + value_4 | avg +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | 1.00000000000000000000 Thu Dec 01 00:00:00 2016 | 1.00000000000000000000 Fri Dec 02 00:00:00 2016 | 2.0000000000000000 @@ -484,8 +484,8 @@ SELECT END) as c FROM reference_table_test; - c ---- + c +--------------------------------------------------------------------- 3 (1 row) @@ -505,8 +505,8 @@ SELECT value_1 ORDER BY 1; - value_1 | c ----------+--- + value_1 | c +--------------------------------------------------------------------- 1 | 0 2 | 0 3 | 1 @@ -517,8 +517,8 @@ SELECT -- selects inside a transaction works fine as well BEGIN; SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 @@ -530,8 +530,8 @@ SELECT * FROM reference_table_test; (8 rows) SELECT * FROM reference_table_test WHERE value_1 = 1; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (2 rows) @@ -545,27 +545,27 @@ DECLARE test_cursor CURSOR FOR WHERE value_1 = 1 OR value_1 = 2 ORDER BY value_1; FETCH test_cursor; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) FETCH ALL test_cursor; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (3 rows) FETCH test_cursor; -- fetch one row after the last - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+--------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (1 row) @@ -580,16 +580,16 @@ CREATE TEMP TABLE temp_reference_test as -- first create two more tables CREATE TABLE reference_table_test_second (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_second'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE reference_table_test_third (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_third'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- ingest some data to both tables @@ -601,15 +601,15 @@ INSERT INTO reference_table_test_third VALUES (5, 5.0, '5', '2016-12-05'); \c - - - :worker_2_port -- SELECT .. FOR UPDATE should work on second worker (takes lock on first worker) SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; - value_1 | value_2 ----------+--------- + value_1 | value_2 +--------------------------------------------------------------------- 1 | 1 (1 row) BEGIN; SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; - value_1 | value_2 ----------+--------- + value_1 | value_2 +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -623,8 +623,8 @@ WHERE t1.value_2 = t2.value_2 ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 1 2 3 @@ -638,8 +638,8 @@ WHERE t1.value_2 = t3.value_2 ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 4 5 (2 rows) @@ -652,8 +652,8 @@ WHERE t2.value_2 = t3.value_2 ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- (0 rows) -- join on different columns and different data types via casts @@ -665,8 +665,8 @@ WHERE t1.value_2 = t2.value_1 ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 1 2 3 @@ -680,8 +680,8 @@ WHERE t1.value_2 = t2.value_3::int ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 1 2 3 @@ -695,8 +695,8 @@ WHERE t1.value_2 = date_part('day', t2.value_4) ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 1 2 3 @@ -714,8 +714,8 @@ WHERE t1.value_2 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_2 ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 3 (1 row) @@ -728,8 +728,8 @@ WHERE t1.value_1 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_1 ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 3 (1 row) @@ -741,8 +741,8 @@ FROM JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 3 (1 row) @@ -754,8 +754,8 @@ FROM LEFT JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 1 2 3 @@ -770,10 +770,10 @@ FROM RIGHT JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 3 - + (2 rows) \c - - - :master_port @@ -782,16 +782,16 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; CREATE TABLE colocated_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test', 'value_1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE colocated_table_test_2 (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test_2', 'value_1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) DELETE FROM reference_table_test; @@ -812,8 +812,8 @@ WHERE colocated_table_test.value_1 = reference_table_test.value_1 ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] - value_1 ---------- + value_1 +--------------------------------------------------------------------- 1 2 (2 rows) @@ -826,8 +826,8 @@ WHERE colocated_table_test.value_2 = reference_table_test.value_2 ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] - value_2 ---------- + value_2 +--------------------------------------------------------------------- 1 2 (2 rows) @@ -840,8 +840,8 @@ WHERE reference_table_test.value_1 = colocated_table_test.value_1 ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] - value_2 ---------- + value_2 +--------------------------------------------------------------------- 1 2 (2 rows) @@ -855,8 +855,8 @@ WHERE colocated_table_test.value_2 = reference_table_test.value_2 ORDER BY colocated_table_test.value_2; LOG: join order: [ "colocated_table_test_2" ][ cartesian product reference join "reference_table_test" ][ dual partition join "colocated_table_test" ] - value_2 ---------- + value_2 +--------------------------------------------------------------------- 1 1 2 @@ -872,8 +872,8 @@ WHERE colocated_table_test.value_1 = colocated_table_test_2.value_1 AND colocated_table_test.value_2 = reference_table_test.value_2 ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ local partition join "colocated_table_test_2" ] - value_2 ---------- + value_2 +--------------------------------------------------------------------- 1 2 (2 rows) @@ -887,8 +887,8 @@ WHERE colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2 ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] - value_2 ---------- + value_2 +--------------------------------------------------------------------- 1 2 (2 rows) @@ -901,8 +901,8 @@ WHERE colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1 ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] - value_2 ---------- + value_2 +--------------------------------------------------------------------- 1 2 (2 rows) diff --git a/src/test/regress/expected/multi_mx_repartition_join_w1.out b/src/test/regress/expected/multi_mx_repartition_join_w1.out index f0057e5fc..c3072c5f3 100644 --- a/src/test/regress/expected/multi_mx_repartition_join_w1.out +++ b/src/test/regress/expected/multi_mx_repartition_join_w1.out @@ -1,5 +1,5 @@ -- Test two concurrent reparttition joins from two different workers --- This test runs the below query from the :worker_1_port and the +-- This test runs the below query from the :worker_1_port and the -- concurrent test runs the same query on :worker_2_port. Note that, both -- tests use the same sequence ids but the queries should not fail. \c - - - :worker_1_port diff --git a/src/test/regress/expected/multi_mx_repartition_join_w2.out b/src/test/regress/expected/multi_mx_repartition_join_w2.out index 4913108fa..a643cd90d 100644 --- a/src/test/regress/expected/multi_mx_repartition_join_w2.out +++ b/src/test/regress/expected/multi_mx_repartition_join_w2.out @@ -1,5 +1,5 @@ -- Test two concurrent reparttition joins from two different workers --- This test runs the below query from the :worker_2_port and the +-- This test runs the below query from the :worker_2_port and the -- concurrent test runs the same query on :worker_1_port. Note that, both -- tests use the same sequence ids but the queries should not fail. \c - - - :worker_2_port diff --git a/src/test/regress/expected/multi_mx_repartition_udt_prepare.out b/src/test/regress/expected/multi_mx_repartition_udt_prepare.out index b8760055d..143f0a94d 100644 --- a/src/test/regress/expected/multi_mx_repartition_udt_prepare.out +++ b/src/test/regress/expected/multi_mx_repartition_udt_prepare.out @@ -127,16 +127,16 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; SET citus.shard_count TO 3; SELECT create_distributed_table('repartition_udt', 'pk'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET citus.shard_count TO 5; SELECT create_distributed_table('repartition_udt_other', 'pk'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO repartition_udt values (1, '(1,1)'::test_udt, 'foo'); @@ -157,8 +157,8 @@ SET citus.task_executor_type = 'task-tracker'; -- join on int column, and be empty. SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk; - pk | udtcol | txtcol | pk | udtcol | txtcol -----+--------+--------+----+--------+-------- + pk | udtcol | txtcol | pk | udtcol | txtcol +--------------------------------------------------------------------- (0 rows) -- Query that should result in a repartition join on UDT column. @@ -167,8 +167,8 @@ EXPLAIN SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.udtcol = repartition_udt_other.udtcol WHERE repartition_udt.pk > 1; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] - QUERY PLAN --------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) Task Count: 4 Tasks Shown: None, not supported for re-partition queries @@ -185,8 +185,8 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other WHERE repartition_udt.pk > 1 ORDER BY repartition_udt.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] - pk | udtcol | txtcol | pk | udtcol | txtcol -----+--------+--------+----+--------+-------- + pk | udtcol | txtcol | pk | udtcol | txtcol +--------------------------------------------------------------------- 2 | (1,2) | foo | 8 | (1,2) | foo 3 | (1,3) | foo | 9 | (1,3) | foo 4 | (2,1) | foo | 10 | (2,1) | foo diff --git a/src/test/regress/expected/multi_mx_repartition_udt_w1.out b/src/test/regress/expected/multi_mx_repartition_udt_w1.out index 934d94725..2095866e8 100644 --- a/src/test/regress/expected/multi_mx_repartition_udt_w1.out +++ b/src/test/regress/expected/multi_mx_repartition_udt_w1.out @@ -11,8 +11,8 @@ SET citus.log_multi_join_order = true; SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] - pk | udtcol | txtcol | pk | udtcol | txtcol -----+--------+--------+----+--------+-------- + pk | udtcol | txtcol | pk | udtcol | txtcol +--------------------------------------------------------------------- (0 rows) SELECT * FROM repartition_udt JOIN repartition_udt_other @@ -20,8 +20,8 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other WHERE repartition_udt.pk > 1 ORDER BY repartition_udt.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] - pk | udtcol | txtcol | pk | udtcol | txtcol -----+--------+--------+----+--------+-------- + pk | udtcol | txtcol | pk | udtcol | txtcol +--------------------------------------------------------------------- 2 | (1,2) | foo | 8 | (1,2) | foo 3 | (1,3) | foo | 9 | (1,3) | foo 4 | (2,1) | foo | 10 | (2,1) | foo diff --git a/src/test/regress/expected/multi_mx_repartition_udt_w2.out b/src/test/regress/expected/multi_mx_repartition_udt_w2.out index c35b003ff..547c62c5b 100644 --- a/src/test/regress/expected/multi_mx_repartition_udt_w2.out +++ b/src/test/regress/expected/multi_mx_repartition_udt_w2.out @@ -11,8 +11,8 @@ SET citus.log_multi_join_order = true; SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] - pk | udtcol | txtcol | pk | udtcol | txtcol -----+--------+--------+----+--------+-------- + pk | udtcol | txtcol | pk | udtcol | txtcol +--------------------------------------------------------------------- (0 rows) SELECT * FROM repartition_udt JOIN repartition_udt_other @@ -20,8 +20,8 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other WHERE repartition_udt.pk > 1 ORDER BY repartition_udt.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] - pk | udtcol | txtcol | pk | udtcol | txtcol -----+--------+--------+----+--------+-------- + pk | udtcol | txtcol | pk | udtcol | txtcol +--------------------------------------------------------------------- 2 | (1,2) | foo | 8 | (1,2) | foo 3 | (1,3) | foo | 9 | (1,3) | foo 4 | (2,1) | foo | 10 | (2,1) | foo diff --git a/src/test/regress/expected/multi_mx_router_planner.out b/src/test/regress/expected/multi_mx_router_planner.out index 885ab82ba..3e2cd40ff 100644 --- a/src/test/regress/expected/multi_mx_router_planner.out +++ b/src/test/regress/expected/multi_mx_router_planner.out @@ -73,8 +73,8 @@ SELECT * FROM articles_hash_mx WHERE author_id = 10 AND id = 50; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - id | author_id | title | word_count -----+-----------+-----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 50 | 10 | anjanette | 19519 (1 row) @@ -83,8 +83,8 @@ SELECT title FROM articles_hash_mx WHERE author_id = 10; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - title ------------- + title +--------------------------------------------------------------------- aggrandize absentness andelee @@ -99,8 +99,8 @@ SELECT title, word_count FROM articles_hash_mx DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - title | word_count -------------+------------ + title | word_count +--------------------------------------------------------------------- anjanette | 19519 aggrandize | 17277 attemper | 14976 @@ -116,8 +116,8 @@ SELECT title, id FROM articles_hash_mx DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - title | id ----------+---- + title | id +--------------------------------------------------------------------- aruru | 5 adversa | 15 (2 rows) @@ -129,8 +129,8 @@ SELECT title, author_id FROM articles_hash_mx ORDER BY author_id ASC, id; DEBUG: Creating router plan DEBUG: Plan is router executable - title | author_id --------------+----------- + title | author_id +--------------------------------------------------------------------- aseptic | 7 auriga | 7 arsenous | 7 @@ -148,8 +148,8 @@ SELECT title, author_id FROM articles_hash_mx WHERE author_id = 7 OR author_id = 8; DEBUG: Creating router plan DEBUG: Plan is router executable - title | author_id --------------+----------- + title | author_id +--------------------------------------------------------------------- aseptic | 7 agatized | 8 auriga | 7 @@ -171,8 +171,8 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash_mx ORDER BY sum(word_count) DESC; DEBUG: Creating router plan DEBUG: Plan is router executable - author_id | corpus_size ------------+------------- + author_id | corpus_size +--------------------------------------------------------------------- 10 | 59955 8 | 55410 7 | 36756 @@ -188,8 +188,8 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash_mx DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - author_id | corpus_size ------------+------------- + author_id | corpus_size +--------------------------------------------------------------------- 1 | 35894 (1 row) @@ -197,8 +197,8 @@ DETAIL: distribution column value: 1 -- not router-plannable due to <= and IN SELECT * FROM articles_hash_mx WHERE author_id <= 1; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -209,8 +209,8 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM articles_hash_mx WHERE author_id IN (1, 3); DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 @@ -229,8 +229,8 @@ SELECT * FROM first_author; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id ----- + id +--------------------------------------------------------------------- 1 11 21 @@ -244,8 +244,8 @@ SELECT title FROM articles_hash_mx WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - title --------------- + title +--------------------------------------------------------------------- arsenous alamo arcading @@ -260,8 +260,8 @@ SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | id | title -----+-----------+----+-------------- + id | author_id | id | title +--------------------------------------------------------------------- 1 | 1 | 1 | arsenous 11 | 1 | 11 | alamo 21 | 1 | 21 | arcading @@ -274,8 +274,8 @@ id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 3) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | id | title -----+-----------+----+------- + id | author_id | id | title +--------------------------------------------------------------------- (0 rows) -- CTE joins on different workers are supported because they are both planned recursively @@ -283,19 +283,19 @@ WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 2) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: cannot run command which targets multiple shards -DEBUG: generating subplan 66_1 for CTE id_author: SELECT id, author_id FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 1) +DEBUG: generating subplan XXX_1 for CTE id_author: SELECT id, author_id FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 1) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 -DEBUG: generating subplan 66_2 for CTE id_title: SELECT id, title FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 2) +DEBUG: generating subplan XXX_2 for CTE id_title: SELECT id, title FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 2) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 -DEBUG: Plan 66 query after replacing subqueries and CTEs: SELECT id_author.id, id_author.author_id, id_title.id, id_title.title FROM (SELECT intermediate_result.id, intermediate_result.author_id FROM read_intermediate_result('66_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint)) id_author, (SELECT intermediate_result.id, intermediate_result.title FROM read_intermediate_result('66_2'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, title character varying(20))) id_title WHERE (id_author.id OPERATOR(pg_catalog.=) id_title.id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT id_author.id, id_author.author_id, id_title.id, id_title.title FROM (SELECT intermediate_result.id, intermediate_result.author_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint)) id_author, (SELECT intermediate_result.id, intermediate_result.title FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, title character varying(20))) id_title WHERE (id_author.id OPERATOR(pg_catalog.=) id_title.id) DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | id | title -----+-----------+----+------- + id | author_id | id | title +--------------------------------------------------------------------- (0 rows) -- recursive CTEs are supported when filtered on partition column @@ -346,8 +346,8 @@ SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - company_id | employee_id | manager_id | level -------------+-------------+------------+------- + company_id | employee_id | manager_id | level +--------------------------------------------------------------------- 1 | 1 | 0 | 1 1 | 2 | 1 | 2 1 | 3 | 1 | 2 @@ -392,8 +392,8 @@ SELECT ORDER BY id, subtitle; DEBUG: Creating router plan DEBUG: Plan is router executable - id | subtitle | count -----+----------+------- + id | subtitle | count +--------------------------------------------------------------------- 1 | | 1 3 | | 1 11 | | 1 @@ -429,8 +429,8 @@ SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count | position -----+-----------+--------------+------------+---------- + id | author_id | title | word_count | position +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 | 3 11 | 1 | alamo | 1347 | 3 21 | 1 | arcading | 5890 | 3 @@ -441,8 +441,8 @@ DETAIL: distribution column value: 1 SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 3; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count | position -----+-----------+--------------+------------+---------- + id | author_id | title | word_count | position +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 | 3 3 | 3 | asternal | 10480 | 3 11 | 1 | alamo | 1347 | 3 @@ -459,8 +459,8 @@ DEBUG: Plan is router executable SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 2 ORDER BY 4 DESC, 1 DESC, 2 DESC LIMIT 5; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 5 - id | author_id | title | word_count | position -----+-----------+------------+------------+---------- + id | author_id | title | word_count | position +--------------------------------------------------------------------- 12 | 2 | archiblast | 18185 | 3 42 | 2 | ausable | 15885 | 3 2 | 2 | abducing | 13642 | 3 @@ -474,12 +474,12 @@ FROM articles_hash_mx, (SELECT id, word_count FROM articles_hash_mx) AS test WHE ORDER BY test.word_count DESC, articles_hash_mx.id LIMIT 5; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 85_1 for subquery SELECT id, word_count FROM public.articles_hash_mx -DEBUG: Plan 85 query after replacing subqueries and CTEs: SELECT articles_hash_mx.id, test.word_count FROM public.articles_hash_mx, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('85_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE (test.id OPERATOR(pg_catalog.=) articles_hash_mx.id) ORDER BY test.word_count DESC, articles_hash_mx.id LIMIT 5 +DEBUG: generating subplan XXX_1 for subquery SELECT id, word_count FROM public.articles_hash_mx +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT articles_hash_mx.id, test.word_count FROM public.articles_hash_mx, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE (test.id OPERATOR(pg_catalog.=) articles_hash_mx.id) ORDER BY test.word_count DESC, articles_hash_mx.id LIMIT 5 DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 5 - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 50 | 19519 14 | 19094 48 | 18610 @@ -493,13 +493,13 @@ WHERE test.id = articles_hash_mx.id and articles_hash_mx.author_id = 1 ORDER BY articles_hash_mx.id; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 87_1 for subquery SELECT id, word_count FROM public.articles_hash_mx -DEBUG: Plan 87 query after replacing subqueries and CTEs: SELECT articles_hash_mx.id, test.word_count FROM public.articles_hash_mx, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('87_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE ((test.id OPERATOR(pg_catalog.=) articles_hash_mx.id) AND (articles_hash_mx.author_id OPERATOR(pg_catalog.=) 1)) ORDER BY articles_hash_mx.id +DEBUG: generating subplan XXX_1 for subquery SELECT id, word_count FROM public.articles_hash_mx +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT articles_hash_mx.id, test.word_count FROM public.articles_hash_mx, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE ((test.id OPERATOR(pg_catalog.=) articles_hash_mx.id) AND (articles_hash_mx.author_id OPERATOR(pg_catalog.=) 1)) ORDER BY articles_hash_mx.id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -521,8 +521,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -536,8 +536,8 @@ SELECT * WHERE author_id = 1 OR author_id = 17; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -551,8 +551,8 @@ SELECT * FROM articles_hash_mx WHERE author_id = 1 OR author_id = 18; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -567,8 +567,8 @@ SELECT id as article_id, word_count * id as random_value DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - article_id | random_value -------------+-------------- + article_id | random_value +--------------------------------------------------------------------- 1 | 9572 11 | 14817 21 | 123690 @@ -584,8 +584,8 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count ---------------+------------------- + first_author | second_word_count +--------------------------------------------------------------------- 10 | 17277 10 | 1820 10 | 6363 @@ -600,8 +600,8 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count ---------------+------------------- + first_author | second_word_count +--------------------------------------------------------------------- 10 | 19519 10 | 19519 10 | 19519 @@ -617,15 +617,15 @@ SELECT a.author_id as first_author, b.word_count as second_word_count LIMIT 3; DEBUG: Found no worker with all shard placements DEBUG: found no worker with all shard placements -DEBUG: generating subplan 96_1 for CTE single_shard: SELECT id, author_id, title, word_count FROM public.articles_single_shard_hash_mx +DEBUG: generating subplan XXX_1 for CTE single_shard: SELECT id, author_id, title, word_count FROM public.articles_single_shard_hash_mx DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: Plan 96 query after replacing subqueries and CTEs: SELECT a.author_id AS first_author, b.word_count AS second_word_count FROM public.articles_hash_mx a, (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('96_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) b WHERE ((a.author_id OPERATOR(pg_catalog.=) 2) AND (a.author_id OPERATOR(pg_catalog.=) b.author_id)) LIMIT 3 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT a.author_id AS first_author, b.word_count AS second_word_count FROM public.articles_hash_mx a, (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) b WHERE ((a.author_id OPERATOR(pg_catalog.=) 2) AND (a.author_id OPERATOR(pg_catalog.=) b.author_id)) LIMIT 3 DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - first_author | second_word_count ---------------+------------------- + first_author | second_word_count +--------------------------------------------------------------------- (0 rows) -- single shard select with limit is router plannable @@ -636,8 +636,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -652,8 +652,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 (2 rows) @@ -668,8 +668,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 31 | 1 | athwartships | 7271 21 | 1 | arcading | 5890 (2 rows) @@ -683,8 +683,8 @@ SELECT id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id ----- + id +--------------------------------------------------------------------- 1 11 21 @@ -700,8 +700,8 @@ SELECT distinct id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id ----- + id +--------------------------------------------------------------------- 1 11 21 @@ -716,8 +716,8 @@ SELECT avg(word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - avg --------------------- + avg +--------------------------------------------------------------------- 12356.400000000000 (1 row) @@ -729,8 +729,8 @@ SELECT max(word_count) as max, min(word_count) as min, DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - max | min | sum | cnt --------+------+-------+----- + max | min | sum | cnt +--------------------------------------------------------------------- 18185 | 2728 | 61782 | 5 (1 row) @@ -742,8 +742,8 @@ SELECT max(word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - max -------- + max +--------------------------------------------------------------------- 11814 (1 row) @@ -756,8 +756,8 @@ SELECT * FROM ( ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 @@ -775,8 +775,8 @@ UNION (SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 3); DEBUG: Creating router plan DEBUG: Plan is router executable - left ------- + left +--------------------------------------------------------------------- a (1 row) @@ -785,8 +785,8 @@ INTERSECT (SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 3); DEBUG: Creating router plan DEBUG: Plan is router executable - left ------- + left +--------------------------------------------------------------------- a (1 row) @@ -798,8 +798,8 @@ SELECT * FROM ( ORDER BY 1; DEBUG: Creating router plan DEBUG: Plan is router executable - left ------- + left +--------------------------------------------------------------------- al ar at @@ -812,11 +812,11 @@ SET client_min_messages TO DEBUG1; UNION (SELECT * FROM articles_hash_mx WHERE author_id = 2) ORDER BY 1,2; -DEBUG: generating subplan 110_1 for subquery SELECT id, author_id, title, word_count FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 1) -DEBUG: generating subplan 110_2 for subquery SELECT id, author_id, title, word_count FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 2) -DEBUG: Plan 110 query after replacing subqueries and CTEs: SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('110_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer) UNION SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('110_2'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer) ORDER BY 1, 2 - id | author_id | title | word_count -----+-----------+--------------+------------ +DEBUG: generating subplan XXX_1 for subquery SELECT id, author_id, title, word_count FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 1) +DEBUG: generating subplan XXX_2 for subquery SELECT id, author_id, title, word_count FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 2) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer) UNION SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer) ORDER BY 1, 2 + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 11 | 1 | alamo | 1347 @@ -836,8 +836,8 @@ SELECT * FROM ( ORDER BY 1, 2 LIMIT 5; DEBUG: push down of limit count: 5 - id | author_id | title | word_count -----+-----------+------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 11 | 1 | alamo | 1347 @@ -857,8 +857,8 @@ SELECT * FROM articles_hash_mx WHERE author_id >= 1 AND author_id <= 3 ORDER BY 1,2,3,4; - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 3 | 3 | asternal | 10480 @@ -887,8 +887,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -901,8 +901,8 @@ SELECT * FROM articles_hash_mx WHERE author_id = 1 or id = 1; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -917,8 +917,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 41 | 1 | aznavour | 11814 (2 rows) @@ -930,8 +930,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- not router plannable due to function call on the right side @@ -939,8 +939,8 @@ SELECT * FROM articles_hash_mx WHERE author_id = (random()::int * 0 + 1); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -953,8 +953,8 @@ SELECT * FROM articles_hash_mx WHERE author_id = 1 or id = 1; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -969,8 +969,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -983,8 +983,8 @@ SELECT * FROM articles_hash_mx WHERE 1 = abs(author_id); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -997,8 +997,8 @@ SELECT * FROM articles_hash_mx WHERE author_id = abs(author_id - 2); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1013,8 +1013,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) @@ -1023,8 +1023,8 @@ SELECT * FROM articles_hash_mx WHERE (author_id = 1) is true; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1039,8 +1039,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1055,8 +1055,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 (2 rows) @@ -1068,8 +1068,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 (2 rows) @@ -1081,8 +1081,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 (2 rows) @@ -1094,8 +1094,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1110,8 +1110,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 41 | 1 | aznavour | 11814 @@ -1124,8 +1124,8 @@ SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - prev | title | word_count -----------+----------+------------ + prev | title | word_count +--------------------------------------------------------------------- | afrasia | 864 afrasia | adversa | 3164 adversa | antehall | 7707 @@ -1140,8 +1140,8 @@ SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - prev | title | word_count -----------+----------+------------ + prev | title | word_count +--------------------------------------------------------------------- aminate | aruru | 11389 antehall | aminate | 9089 adversa | antehall | 7707 @@ -1155,8 +1155,8 @@ SELECT id, MIN(id) over (order by word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | min -----+----- + id | min +--------------------------------------------------------------------- 11 | 11 21 | 11 31 | 11 @@ -1170,8 +1170,8 @@ SELECT id, word_count, AVG(word_count) over (order by word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | word_count | avg -----+------------+----------------------- + id | word_count | avg +--------------------------------------------------------------------- 11 | 1347 | 1347.0000000000000000 21 | 5890 | 3618.5000000000000000 31 | 7271 | 4836.0000000000000000 @@ -1185,8 +1185,8 @@ SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - word_count | rank -------------+------ + word_count | rank +--------------------------------------------------------------------- 1347 | 1 5890 | 2 7271 | 3 @@ -1224,8 +1224,8 @@ SELECT DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - c ---- + c +--------------------------------------------------------------------- 5 (1 row) @@ -1245,8 +1245,8 @@ SELECT author_id ORDER BY c; DEBUG: Router planner cannot handle multi-shard select queries - c ---- + c +--------------------------------------------------------------------- 4 5 5 @@ -1268,8 +1268,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1289,20 +1289,20 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 FETCH test_cursor; - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) FETCH test_cursor; - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 11 | 1 | alamo | 1347 (1 row) FETCH BACKWARD test_cursor; - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) @@ -1337,8 +1337,8 @@ SELECT count(*), count(*) FILTER (WHERE id < 3) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count | count --------+------- + count | count +--------------------------------------------------------------------- 5 | 1 (1 row) @@ -1347,8 +1347,8 @@ SELECT count(*), count(*) FILTER (WHERE id < 3) FROM articles_hash_mx WHERE author_id = 1 or author_id = 2; DEBUG: Router planner cannot handle multi-shard select queries - count | count --------+------- + count | count +--------------------------------------------------------------------- 10 | 2 (1 row) @@ -1361,8 +1361,8 @@ EXECUTE author_1_articles; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1379,8 +1379,8 @@ EXECUTE author_articles(1); DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1409,8 +1409,8 @@ DETAIL: distribution column value: 1 CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash_mx ah WHERE author_id = 1" PL/pgSQL function author_articles_max_id() line 5 at SQL statement - author_articles_max_id ------------------------- + author_articles_max_id +--------------------------------------------------------------------- 41 (1 row) @@ -1437,8 +1437,8 @@ CONTEXT: SQL statement "SELECT ah.id, ah.word_count FROM articles_hash_mx ah WHERE author_id = 1" PL/pgSQL function author_articles_id_word_count() line 4 at RETURN QUERY - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1453,8 +1453,8 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 SELECT * FROM mv_articles_hash_mx; - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1487,8 +1487,8 @@ SELECT id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id ----- + id +--------------------------------------------------------------------- 1 11 21 diff --git a/src/test/regress/expected/multi_mx_schema_support.out b/src/test/regress/expected/multi_mx_schema_support.out index afb5f687e..30fbffc5a 100644 --- a/src/test/regress/expected/multi_mx_schema_support.out +++ b/src/test/regress/expected/multi_mx_schema_support.out @@ -5,44 +5,44 @@ \c - - - :worker_1_port -- test very basic queries SELECT * FROM nation_hash ORDER BY n_nationkey LIMIT 4; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon - 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special + 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold (4 rows) SELECT * FROM citus_mx_test_schema.nation_hash ORDER BY n_nationkey LIMIT 4; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon - 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special + 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold (4 rows) -- test cursors SET search_path TO public; BEGIN; -DECLARE test_cursor CURSOR FOR +DECLARE test_cursor CURSOR FOR SELECT * FROM nation_hash WHERE n_nationkey = 1; FETCH test_cursor; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; - n_nationkey | n_name | n_regionkey | n_comment --------------+--------+-------------+----------- + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -50,24 +50,24 @@ END; -- test with search_path is set SET search_path TO citus_mx_test_schema; BEGIN; -DECLARE test_cursor CURSOR FOR +DECLARE test_cursor CURSOR FOR SELECT * FROM nation_hash WHERE n_nationkey = 1; FETCH test_cursor; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; - n_nationkey | n_name | n_regionkey | n_comment --------------+--------+-------------+----------- + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -77,9 +77,9 @@ SET search_path TO public; INSERT INTO citus_mx_test_schema.nation_hash(n_nationkey, n_name, n_regionkey) VALUES (100, 'TURKEY', 3); -- verify insertion SELECT * FROM citus_mx_test_schema.nation_hash WHERE n_nationkey = 100; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+----------- - 100 | TURKEY | 3 | + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- + 100 | TURKEY | 3 | (1 row) -- test with search_path is set @@ -87,9 +87,9 @@ SET search_path TO citus_mx_test_schema; INSERT INTO nation_hash(n_nationkey, n_name, n_regionkey) VALUES (101, 'GERMANY', 3); -- verify insertion SELECT * FROM nation_hash WHERE n_nationkey = 101; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+----------- - 101 | GERMANY | 3 | + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- + 101 | GERMANY | 3 | (1 row) -- TODO: add UPDATE/DELETE/UPSERT @@ -97,8 +97,8 @@ SELECT * FROM nation_hash WHERE n_nationkey = 101; SET search_path TO public; -- UDF in public, table in a schema other than public, search_path is not set SELECT simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; - simpletestfunction --------------------- + simpletestfunction +--------------------------------------------------------------------- 152 151 37 @@ -109,8 +109,8 @@ SELECT simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nation_has -- UDF in public, table in a schema other than public, search_path is set SET search_path TO citus_mx_test_schema; SELECT public.simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; - simpletestfunction --------------------- + simpletestfunction +--------------------------------------------------------------------- 152 151 37 @@ -121,8 +121,8 @@ SELECT public.simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nat -- UDF in schema, table in a schema other than public, search_path is not set SET search_path TO public; SELECT citus_mx_test_schema.simpleTestFunction2(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; - simpletestfunction2 ---------------------- + simpletestfunction2 +--------------------------------------------------------------------- 152 151 37 @@ -133,8 +133,8 @@ SELECT citus_mx_test_schema.simpleTestFunction2(n_nationkey)::int FROM citus_mx_ -- UDF in schema, table in a schema other than public, search_path is set SET search_path TO citus_mx_test_schema; SELECT simpleTestFunction2(n_nationkey)::int FROM nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; - simpletestfunction2 ---------------------- + simpletestfunction2 +--------------------------------------------------------------------- 152 151 37 @@ -146,75 +146,75 @@ SELECT simpleTestFunction2(n_nationkey)::int FROM nation_hash GROUP BY 1 ORDER SET search_path TO public; -- test with search_path is not set SELECT * FROM citus_mx_test_schema.nation_hash WHERE n_nationkey OPERATOR(citus_mx_test_schema.===) 1; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) -- test with search_path is set SET search_path TO citus_mx_test_schema; SELECT * FROM nation_hash WHERE n_nationkey OPERATOR(===) 1; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) SELECT * FROM citus_mx_test_schema.nation_hash_collation_search_path ORDER BY 1; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon - 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special + 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4 | EGYPT | 4 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5 | ETHIOPIA | 0 | ven packages wake quickly. regu (6 rows) SELECT n_comment FROM citus_mx_test_schema.nation_hash_collation_search_path ORDER BY n_comment COLLATE citus_mx_test_schema.english; - n_comment -------------------------------------------------------------------------------------------------------------- + n_comment +--------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold haggle. carefully final deposits detect slyly agai ven packages wake quickly. regu y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d - y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special + y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special (6 rows) SET search_path TO citus_mx_test_schema; SELECT * FROM nation_hash_collation_search_path ORDER BY 1 DESC; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 5 | ETHIOPIA | 0 | ven packages wake quickly. regu 4 | EGYPT | 4 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold - 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special + 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai (6 rows) SELECT n_comment FROM nation_hash_collation_search_path ORDER BY n_comment COLLATE english; - n_comment -------------------------------------------------------------------------------------------------------------- + n_comment +--------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold haggle. carefully final deposits detect slyly agai ven packages wake quickly. regu y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d - y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special + y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special (6 rows) SELECT * FROM citus_mx_test_schema.nation_hash_composite_types WHERE test_col = '(a,a)'::citus_mx_test_schema.new_composite_type ORDER BY 1::int DESC; - n_nationkey | n_name | n_regionkey | n_comment | test_col --------------+---------------------------+-------------+----------------------------------------------------+---------- + n_nationkey | n_name | n_regionkey | n_comment | test_col +--------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) --test with search_path is set SET search_path TO citus_mx_test_schema; SELECT * FROM nation_hash_composite_types WHERE test_col = '(a,a)'::new_composite_type ORDER BY 1::int DESC; - n_nationkey | n_name | n_regionkey | n_comment | test_col --------------+---------------------------+-------------+----------------------------------------------------+---------- + n_nationkey | n_name | n_regionkey | n_comment | test_col +--------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) @@ -222,14 +222,14 @@ SELECT * FROM nation_hash_composite_types WHERE test_col = '(a,a)'::new_composit -- join of two tables which are in different schemas, -- join on partition column SET search_path TO public; -SELECT +SELECT count (*) FROM - citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 + citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; - count -------- + count +--------------------------------------------------------------------- 25 (1 row) @@ -237,14 +237,14 @@ WHERE -- join of two tables which are in different schemas, -- join on partition column SET search_path TO citus_mx_test_schema_join_1; -SELECT +SELECT count (*) FROM - nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 + nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; - count -------- + count +--------------------------------------------------------------------- 25 (1 row) @@ -252,14 +252,14 @@ WHERE -- join of two tables which are in same schemas, -- join on partition column SET search_path TO public; -SELECT +SELECT count (*) FROM - citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_1.nation_hash_2 n2 + citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_1.nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; - count -------- + count +--------------------------------------------------------------------- 25 (1 row) @@ -267,14 +267,14 @@ WHERE -- join of two tables which are in same schemas, -- join on partition column SET search_path TO citus_mx_test_schema_join_1; -SELECT +SELECT count (*) FROM - nation_hash n1, nation_hash_2 n2 + nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; - count -------- + count +--------------------------------------------------------------------- 25 (1 row) @@ -290,8 +290,8 @@ FROM citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; - count -------- + count +--------------------------------------------------------------------- 25 (1 row) @@ -305,8 +305,8 @@ FROM nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; - count -------- + count +--------------------------------------------------------------------- 25 (1 row) @@ -320,12 +320,12 @@ FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_regionkey; - count -------- + count +--------------------------------------------------------------------- 25 (1 row) --- hash repartition joins +-- hash repartition joins -- check when search_path is public, -- join of two tables which are in different schemas, -- join on non-partition column @@ -336,8 +336,8 @@ FROM citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; - count -------- + count +--------------------------------------------------------------------- 125 (1 row) @@ -351,8 +351,8 @@ FROM nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; - count -------- + count +--------------------------------------------------------------------- 125 (1 row) @@ -366,8 +366,8 @@ FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_regionkey = n2.n_regionkey; - count -------- + count +--------------------------------------------------------------------- 125 (1 row) @@ -388,9 +388,9 @@ SET citus.replication_model TO 'streaming'; SET search_path TO mx_ddl_schema_1; CREATE TABLE table_1 (key int PRIMARY KEY, value text); SELECT create_distributed_table('table_1', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE INDEX i1 ON table_1(value); @@ -399,19 +399,19 @@ CREATE INDEX CONCURRENTLY i2 ON table_1(value); SET search_path TO mx_ddl_schema_1, mx_ddl_schema_2; CREATE TABLE mx_ddl_schema_2.table_2 (key int PRIMARY KEY, value text); SELECT create_distributed_table('mx_ddl_schema_2.table_2', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE table_2 ADD CONSTRAINT test_constraint FOREIGN KEY (key) REFERENCES table_1(key); --- we can also handle schema/table names with quotation +-- we can also handle schema/table names with quotation SET search_path TO "CiTuS.TeAeN"; CREATE TABLE "TeeNTabLE.1!?!"(id int, "TeNANt_Id" int); SELECT create_distributed_table('"TeeNTabLE.1!?!"', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE INDEX "MyTenantIndex" ON "CiTuS.TeAeN"."TeeNTabLE.1!?!"("TeNANt_Id"); @@ -430,7 +430,7 @@ ALTER TABLE "TeeNTabLE.1!?!" ADD COLUMN new_col INT; -- test with a public schema is in the search path SET search_path TO public, "CiTuS.TeAeN"; ALTER TABLE "TeeNTabLE.1!?!" DROP COLUMN new_col; --- make sure that we handle transaction blocks properly +-- make sure that we handle transaction blocks properly BEGIN; SET search_path TO public, "CiTuS.TeAeN"; ALTER TABLE "TeeNTabLE.1!?!" ADD COLUMN new_col INT; diff --git a/src/test/regress/expected/multi_mx_tpch_query1.out b/src/test/regress/expected/multi_mx_tpch_query1.out index 423be6399..567586303 100644 --- a/src/test/regress/expected/multi_mx_tpch_query1.out +++ b/src/test/regress/expected/multi_mx_tpch_query1.out @@ -25,8 +25,8 @@ GROUP BY ORDER BY l_returnflag, l_linestatus; - l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order ---------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+------------- + l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order +--------------------------------------------------------------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 @@ -57,8 +57,8 @@ GROUP BY ORDER BY l_returnflag, l_linestatus; - l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order ---------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+------------- + l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order +--------------------------------------------------------------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 @@ -89,8 +89,8 @@ GROUP BY ORDER BY l_returnflag, l_linestatus; - l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order ---------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+------------- + l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order +--------------------------------------------------------------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 diff --git a/src/test/regress/expected/multi_mx_tpch_query10.out b/src/test/regress/expected/multi_mx_tpch_query10.out index fdd243e15..0ef4049aa 100644 --- a/src/test/regress/expected/multi_mx_tpch_query10.out +++ b/src/test/regress/expected/multi_mx_tpch_query10.out @@ -1,7 +1,7 @@ -- -- MULTI_MX_TPCH_QUERY10 -- --- Query #10 from the TPC-H decision support benchmark. +-- Query #10 from the TPC-H decision support benchmark. -- connect to master \c - - - :master_port SELECT @@ -36,18 +36,18 @@ GROUP BY ORDER BY revenue DESC LIMIT 20; - c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment ------------+--------------------+-------------+-----------+---------------------------+---------------------------------------+-----------------+--------------------------------------------------------------------------------------------------------------------- + c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment +--------------------------------------------------------------------- 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi - 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto - 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole + 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto + 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole 223 | Customer#000000223 | 218652.8040 | 7476.20 | SAUDI ARABIA | ftau6Pk,brboMyEl,,kFm | 30-193-643-1517 | al, regular requests run furiously blithely silent packages. blithely ironic accounts across the furious 613 | Customer#000000613 | 186092.2017 | 6679.75 | EGYPT | AJT,26RbanTdEHOBgTWg | 14-275-416-1669 | ironic, pending deposits: quickl 355 | Customer#000000355 | 168184.4825 | 8727.90 | KENYA | 205r3Xg9ZWjPZNX1z | 24-656-787-6091 | ly bold requests detect furiously. unusual instructions sleep aft 872 | Customer#000000872 | 166831.7294 | -858.61 | PERU | vLP7iNZBK4B,HANFTKabVI3AO Y9O8H | 27-357-139-7164 | detect. packages wake slyly express foxes. even deposits ru 805 | Customer#000000805 | 165239.8440 | 511.69 | IRAN | wCKx5zcHvwpSffyc9qfi9dvqcm9LT,cLAG | 20-732-989-5653 | busy sentiments. pending packages haggle among the express requests-- slyly regular excuses above the slyl 427 | Customer#000000427 | 148033.5226 | 4376.80 | BRAZIL | LHzXa71U2AGqfbqj1yYYqw2MEXq99dWmY | 12-124-309-3821 | y even multipliers according to the regu - 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily + 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily 679 | Customer#000000679 | 145188.0664 | 1394.44 | IRAN | IJf1FlZL9I9m,rvofcoKy5pRUOjUQV | 20-146-696-9508 | ely pending frays boost carefully 160 | Customer#000000160 | 138511.7370 | 4363.17 | JORDAN | 5soVQ3dOCRBWBS | 23-428-666-4806 | olites. silently ironic accounts cajole furious 883 | Customer#000000883 | 128224.1349 | 479.96 | CANADA | qVQ8rWNU5KZYDcS | 13-526-239-6950 | uctions are carefully across the regular, regular asymptote @@ -94,18 +94,18 @@ GROUP BY ORDER BY revenue DESC LIMIT 20; - c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment ------------+--------------------+-------------+-----------+---------------------------+---------------------------------------+-----------------+--------------------------------------------------------------------------------------------------------------------- + c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment +--------------------------------------------------------------------- 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi - 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto - 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole + 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto + 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole 223 | Customer#000000223 | 218652.8040 | 7476.20 | SAUDI ARABIA | ftau6Pk,brboMyEl,,kFm | 30-193-643-1517 | al, regular requests run furiously blithely silent packages. blithely ironic accounts across the furious 613 | Customer#000000613 | 186092.2017 | 6679.75 | EGYPT | AJT,26RbanTdEHOBgTWg | 14-275-416-1669 | ironic, pending deposits: quickl 355 | Customer#000000355 | 168184.4825 | 8727.90 | KENYA | 205r3Xg9ZWjPZNX1z | 24-656-787-6091 | ly bold requests detect furiously. unusual instructions sleep aft 872 | Customer#000000872 | 166831.7294 | -858.61 | PERU | vLP7iNZBK4B,HANFTKabVI3AO Y9O8H | 27-357-139-7164 | detect. packages wake slyly express foxes. even deposits ru 805 | Customer#000000805 | 165239.8440 | 511.69 | IRAN | wCKx5zcHvwpSffyc9qfi9dvqcm9LT,cLAG | 20-732-989-5653 | busy sentiments. pending packages haggle among the express requests-- slyly regular excuses above the slyl 427 | Customer#000000427 | 148033.5226 | 4376.80 | BRAZIL | LHzXa71U2AGqfbqj1yYYqw2MEXq99dWmY | 12-124-309-3821 | y even multipliers according to the regu - 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily + 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily 679 | Customer#000000679 | 145188.0664 | 1394.44 | IRAN | IJf1FlZL9I9m,rvofcoKy5pRUOjUQV | 20-146-696-9508 | ely pending frays boost carefully 160 | Customer#000000160 | 138511.7370 | 4363.17 | JORDAN | 5soVQ3dOCRBWBS | 23-428-666-4806 | olites. silently ironic accounts cajole furious 883 | Customer#000000883 | 128224.1349 | 479.96 | CANADA | qVQ8rWNU5KZYDcS | 13-526-239-6950 | uctions are carefully across the regular, regular asymptote @@ -152,18 +152,18 @@ GROUP BY ORDER BY revenue DESC LIMIT 20; - c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment ------------+--------------------+-------------+-----------+---------------------------+---------------------------------------+-----------------+--------------------------------------------------------------------------------------------------------------------- + c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment +--------------------------------------------------------------------- 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi - 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto - 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole + 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto + 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole 223 | Customer#000000223 | 218652.8040 | 7476.20 | SAUDI ARABIA | ftau6Pk,brboMyEl,,kFm | 30-193-643-1517 | al, regular requests run furiously blithely silent packages. blithely ironic accounts across the furious 613 | Customer#000000613 | 186092.2017 | 6679.75 | EGYPT | AJT,26RbanTdEHOBgTWg | 14-275-416-1669 | ironic, pending deposits: quickl 355 | Customer#000000355 | 168184.4825 | 8727.90 | KENYA | 205r3Xg9ZWjPZNX1z | 24-656-787-6091 | ly bold requests detect furiously. unusual instructions sleep aft 872 | Customer#000000872 | 166831.7294 | -858.61 | PERU | vLP7iNZBK4B,HANFTKabVI3AO Y9O8H | 27-357-139-7164 | detect. packages wake slyly express foxes. even deposits ru 805 | Customer#000000805 | 165239.8440 | 511.69 | IRAN | wCKx5zcHvwpSffyc9qfi9dvqcm9LT,cLAG | 20-732-989-5653 | busy sentiments. pending packages haggle among the express requests-- slyly regular excuses above the slyl 427 | Customer#000000427 | 148033.5226 | 4376.80 | BRAZIL | LHzXa71U2AGqfbqj1yYYqw2MEXq99dWmY | 12-124-309-3821 | y even multipliers according to the regu - 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily + 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily 679 | Customer#000000679 | 145188.0664 | 1394.44 | IRAN | IJf1FlZL9I9m,rvofcoKy5pRUOjUQV | 20-146-696-9508 | ely pending frays boost carefully 160 | Customer#000000160 | 138511.7370 | 4363.17 | JORDAN | 5soVQ3dOCRBWBS | 23-428-666-4806 | olites. silently ironic accounts cajole furious 883 | Customer#000000883 | 128224.1349 | 479.96 | CANADA | qVQ8rWNU5KZYDcS | 13-526-239-6950 | uctions are carefully across the regular, regular asymptote diff --git a/src/test/regress/expected/multi_mx_tpch_query12.out b/src/test/regress/expected/multi_mx_tpch_query12.out index 013135b27..efe3361d4 100644 --- a/src/test/regress/expected/multi_mx_tpch_query12.out +++ b/src/test/regress/expected/multi_mx_tpch_query12.out @@ -32,8 +32,8 @@ GROUP BY l_shipmode ORDER BY l_shipmode; - l_shipmode | high_line_count | low_line_count -------------+-----------------+---------------- + l_shipmode | high_line_count | low_line_count +--------------------------------------------------------------------- MAIL | 11 | 15 SHIP | 11 | 19 (2 rows) @@ -69,8 +69,8 @@ GROUP BY l_shipmode ORDER BY l_shipmode; - l_shipmode | high_line_count | low_line_count -------------+-----------------+---------------- + l_shipmode | high_line_count | low_line_count +--------------------------------------------------------------------- MAIL | 11 | 15 SHIP | 11 | 19 (2 rows) @@ -106,8 +106,8 @@ GROUP BY l_shipmode ORDER BY l_shipmode; - l_shipmode | high_line_count | low_line_count -------------+-----------------+---------------- + l_shipmode | high_line_count | low_line_count +--------------------------------------------------------------------- MAIL | 11 | 15 SHIP | 11 | 19 (2 rows) diff --git a/src/test/regress/expected/multi_mx_tpch_query14.out b/src/test/regress/expected/multi_mx_tpch_query14.out index f613da09b..5cf6e71d2 100644 --- a/src/test/regress/expected/multi_mx_tpch_query14.out +++ b/src/test/regress/expected/multi_mx_tpch_query14.out @@ -17,8 +17,8 @@ WHERE l_partkey = p_partkey AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; - promo_revenue ---------------------- + promo_revenue +--------------------------------------------------------------------- 32.1126387112005225 (1 row) @@ -38,8 +38,8 @@ WHERE l_partkey = p_partkey AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; - promo_revenue ---------------------- + promo_revenue +--------------------------------------------------------------------- 32.1126387112005225 (1 row) @@ -59,8 +59,8 @@ WHERE l_partkey = p_partkey AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; - promo_revenue ---------------------- + promo_revenue +--------------------------------------------------------------------- 32.1126387112005225 (1 row) diff --git a/src/test/regress/expected/multi_mx_tpch_query19.out b/src/test/regress/expected/multi_mx_tpch_query19.out index ee1295dee..cb0625f1b 100644 --- a/src/test/regress/expected/multi_mx_tpch_query19.out +++ b/src/test/regress/expected/multi_mx_tpch_query19.out @@ -34,8 +34,8 @@ WHERE AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); - revenue -------------- + revenue +--------------------------------------------------------------------- 144747.0857 (1 row) @@ -72,8 +72,8 @@ WHERE AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); - revenue -------------- + revenue +--------------------------------------------------------------------- 144747.0857 (1 row) @@ -110,8 +110,8 @@ WHERE AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); - revenue -------------- + revenue +--------------------------------------------------------------------- 144747.0857 (1 row) diff --git a/src/test/regress/expected/multi_mx_tpch_query3.out b/src/test/regress/expected/multi_mx_tpch_query3.out index 45559e6b0..ab151aff0 100644 --- a/src/test/regress/expected/multi_mx_tpch_query3.out +++ b/src/test/regress/expected/multi_mx_tpch_query3.out @@ -1,7 +1,7 @@ -- -- MULTI_MX_TPCH_QUERY3 -- --- Query #3 from the TPC-H decision support benchmark. +-- Query #3 from the TPC-H decision support benchmark. -- connect to the coordinator \c - - - :master_port SELECT @@ -26,8 +26,8 @@ GROUP BY ORDER BY revenue DESC, o_orderdate; - l_orderkey | revenue | o_orderdate | o_shippriority -------------+-------------+-------------+---------------- + l_orderkey | revenue | o_orderdate | o_shippriority +--------------------------------------------------------------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 10916 | 242749.1996 | 03-11-1995 | 0 @@ -70,8 +70,8 @@ GROUP BY ORDER BY revenue DESC, o_orderdate; - l_orderkey | revenue | o_orderdate | o_shippriority -------------+-------------+-------------+---------------- + l_orderkey | revenue | o_orderdate | o_shippriority +--------------------------------------------------------------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 10916 | 242749.1996 | 03-11-1995 | 0 @@ -114,8 +114,8 @@ GROUP BY ORDER BY revenue DESC, o_orderdate; - l_orderkey | revenue | o_orderdate | o_shippriority -------------+-------------+-------------+---------------- + l_orderkey | revenue | o_orderdate | o_shippriority +--------------------------------------------------------------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 10916 | 242749.1996 | 03-11-1995 | 0 diff --git a/src/test/regress/expected/multi_mx_tpch_query6.out b/src/test/regress/expected/multi_mx_tpch_query6.out index 8bdc4c86e..68e2f40e4 100644 --- a/src/test/regress/expected/multi_mx_tpch_query6.out +++ b/src/test/regress/expected/multi_mx_tpch_query6.out @@ -13,8 +13,8 @@ WHERE and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; - revenue -------------- + revenue +--------------------------------------------------------------------- 243277.7858 (1 row) @@ -30,8 +30,8 @@ WHERE and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; - revenue -------------- + revenue +--------------------------------------------------------------------- 243277.7858 (1 row) @@ -47,8 +47,8 @@ WHERE and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; - revenue -------------- + revenue +--------------------------------------------------------------------- 243277.7858 (1 row) diff --git a/src/test/regress/expected/multi_mx_tpch_query7.out b/src/test/regress/expected/multi_mx_tpch_query7.out index ac2e0b2e5..263680d4a 100644 --- a/src/test/regress/expected/multi_mx_tpch_query7.out +++ b/src/test/regress/expected/multi_mx_tpch_query7.out @@ -43,8 +43,8 @@ ORDER BY supp_nation, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- + supp_nation | cust_nation | l_year | revenue +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) @@ -90,8 +90,8 @@ ORDER BY supp_nation, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- + supp_nation | cust_nation | l_year | revenue +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) @@ -137,8 +137,8 @@ ORDER BY supp_nation, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- + supp_nation | cust_nation | l_year | revenue +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) diff --git a/src/test/regress/expected/multi_mx_tpch_query7_nested.out b/src/test/regress/expected/multi_mx_tpch_query7_nested.out index 9271fb615..a95f89f26 100644 --- a/src/test/regress/expected/multi_mx_tpch_query7_nested.out +++ b/src/test/regress/expected/multi_mx_tpch_query7_nested.out @@ -22,18 +22,18 @@ FROM orders_mx, customer_mx, ( - SELECT + SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation - FROM + FROM nation_mx n1, nation_mx n2 - WHERE + WHERE ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') - OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') + OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) ) AS temp WHERE @@ -52,8 +52,8 @@ ORDER BY supp_nation, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- + supp_nation | cust_nation | l_year | revenue +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) @@ -78,18 +78,18 @@ FROM orders_mx, customer_mx, ( - SELECT + SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation - FROM + FROM nation_mx n1, nation_mx n2 - WHERE + WHERE ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') - OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') + OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) ) AS temp WHERE @@ -108,8 +108,8 @@ ORDER BY supp_nation, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- + supp_nation | cust_nation | l_year | revenue +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) @@ -134,18 +134,18 @@ FROM orders_mx, customer_mx, ( - SELECT + SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation - FROM + FROM nation_mx n1, nation_mx n2 - WHERE + WHERE ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') - OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') + OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) ) AS temp WHERE @@ -164,8 +164,8 @@ ORDER BY supp_nation, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- + supp_nation | cust_nation | l_year | revenue +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) diff --git a/src/test/regress/expected/multi_mx_transaction_recovery.out b/src/test/regress/expected/multi_mx_transaction_recovery.out index 1e49ad878..6ef2ba445 100644 --- a/src/test/regress/expected/multi_mx_transaction_recovery.out +++ b/src/test/regress/expected/multi_mx_transaction_recovery.out @@ -4,31 +4,31 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; CREATE TABLE test_recovery (x text); SELECT create_distributed_table('test_recovery', 'x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port -- Disable auto-recovery for the initial tests ALTER SYSTEM SET citus.recover_2pc_interval TO -1; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) SET citus.multi_shard_commit_protocol TO '2pc'; -- Ensure pg_dist_transaction is empty for test SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -36,8 +36,8 @@ SELECT count(*) FROM pg_dist_transaction; -- different result and the prepared statement names should be adapted -- accordingly. SELECT * FROM pg_dist_local_group; - groupid ---------- + groupid +--------------------------------------------------------------------- 14 (1 row) @@ -60,42 +60,42 @@ INSERT INTO pg_dist_transaction VALUES (14, 'citus_14_should_commit'); INSERT INTO pg_dist_transaction VALUES (14, 'citus_14_should_be_forgotten'); INSERT INTO pg_dist_transaction VALUES (122, 'citus_122_should_do_nothing'); SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 3 (1 row) -- delete the citus_122_should_do_nothing transaction DELETE FROM pg_dist_transaction WHERE gid = 'citus_122_should_do_nothing' RETURNING *; - groupid | gid ----------+----------------------------- + groupid | gid +--------------------------------------------------------------------- 122 | citus_122_should_do_nothing (1 row) ROLLBACK PREPARED 'citus_122_should_do_nothing'; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'table_should_abort'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'table_should_commit'; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- plain INSERT does not use 2PC INSERT INTO test_recovery VALUES ('hello'); SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -106,14 +106,14 @@ INSERT INTO test_recovery VALUES ('hello'); INSERT INTO test_recovery VALUES ('world'); COMMIT; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) @@ -126,64 +126,64 @@ INSERT INTO test_recovery VALUES ('hello'); INSERT INTO test_recovery VALUES ('world'); COMMIT; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) -- Committed INSERT..SELECT via coordinator should write 4 transaction recovery records INSERT INTO test_recovery (x) SELECT 'hello-'||s FROM generate_series(1,100) s; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) -- Committed COPY should write 3 transaction records (2 fall into the same shard) COPY test_recovery (x) FROM STDIN CSV; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) -- Test whether auto-recovery runs ALTER SYSTEM SET citus.recover_2pc_interval TO 10; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) -- Sleep 1 second to give Valgrind enough time to clear transactions SELECT pg_sleep(1); - pg_sleep ----------- - + pg_sleep +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) ALTER SYSTEM RESET citus.recover_2pc_interval; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_mx_truncate_from_worker.out b/src/test/regress/expected/multi_mx_truncate_from_worker.out index 705968b22..dc255f01c 100644 --- a/src/test/regress/expected/multi_mx_truncate_from_worker.out +++ b/src/test/regress/expected/multi_mx_truncate_from_worker.out @@ -7,16 +7,16 @@ SET citus.shard_count TO 6; SET citus.replication_model TO streaming; CREATE TABLE "refer'ence_table"(id int PRIMARY KEY); SELECT create_reference_table('refer''ence_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE on_update_fkey_table(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('on_update_fkey_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE on_update_fkey_table ADD CONSTRAINT fkey FOREIGN KEY(value_1) REFERENCES "refer'ence_table"(id) ON UPDATE CASCADE; @@ -25,8 +25,8 @@ INSERT INTO on_update_fkey_table SELECT i, i % 100 FROM generate_series(0, 1000 -- first, make sure that truncate from the coordinator workers as expected TRUNCATE on_update_fkey_table; SELECT count(*) FROM on_update_fkey_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -36,14 +36,14 @@ INSERT INTO on_update_fkey_table SELECT i, i % 100 FROM generate_series(0, 1000 TRUNCATE "refer'ence_table" CASCADE; NOTICE: truncate cascades to table "on_update_fkey_table" SELECT count(*) FROM on_update_fkey_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM "refer'ence_table"; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -55,8 +55,8 @@ BEGIN; ALTER TABLE on_update_fkey_table ADD COLUMN x INT; TRUNCATE on_update_fkey_table; SELECT count(*) FROM on_update_fkey_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -66,8 +66,8 @@ SET search_path TO 'truncate_from_workers'; -- make sure that TRUNCATE workes expected from the worker node TRUNCATE on_update_fkey_table; SELECT count(*) FROM on_update_fkey_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -77,14 +77,14 @@ INSERT INTO on_update_fkey_table SELECT i, i % 100 FROM generate_series(0, 1000 TRUNCATE "refer'ence_table" CASCADE; NOTICE: truncate cascades to table "on_update_fkey_table" SELECT count(*) FROM on_update_fkey_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM "refer'ence_table"; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -114,15 +114,15 @@ SET search_path TO 'truncate_from_workers'; BEGIN; INSERT INTO on_update_fkey_table SELECT i, i % 100 FROM generate_series(0, 1000) i; SELECT count(*) FROM on_update_fkey_table; - count -------- + count +--------------------------------------------------------------------- 1001 (1 row) TRUNCATE on_update_fkey_table; SELECT count(*) FROM on_update_fkey_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -137,8 +137,8 @@ ERROR: lock_relation_if_exists can only be used in transaction blocks BEGIN; -- should fail since the schema is not provided SELECT lock_relation_if_exists('on_update_fkey_table', 'ACCESS SHARE'); - lock_relation_if_exists -------------------------- + lock_relation_if_exists +--------------------------------------------------------------------- f (1 row) @@ -147,8 +147,8 @@ BEGIN; -- should work since the schema is in the search path SET search_path TO 'truncate_from_workers'; SELECT lock_relation_if_exists('on_update_fkey_table', 'ACCESS SHARE'); - lock_relation_if_exists -------------------------- + lock_relation_if_exists +--------------------------------------------------------------------- t (1 row) @@ -156,8 +156,8 @@ ROLLBACK; BEGIN; -- should return false since there is no such table SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_tableXXX', 'ACCESS SHARE'); - lock_relation_if_exists -------------------------- + lock_relation_if_exists +--------------------------------------------------------------------- f (1 row) @@ -170,63 +170,63 @@ ROLLBACK; BEGIN; -- test all lock levels SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'ACCESS SHARE'); - lock_relation_if_exists -------------------------- + lock_relation_if_exists +--------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'ROW SHARE'); - lock_relation_if_exists -------------------------- + lock_relation_if_exists +--------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'ROW EXCLUSIVE'); - lock_relation_if_exists -------------------------- + lock_relation_if_exists +--------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'SHARE UPDATE EXCLUSIVE'); - lock_relation_if_exists -------------------------- + lock_relation_if_exists +--------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'SHARE'); - lock_relation_if_exists -------------------------- + lock_relation_if_exists +--------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'SHARE ROW EXCLUSIVE'); - lock_relation_if_exists -------------------------- + lock_relation_if_exists +--------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'SHARE ROW EXCLUSIVE'); - lock_relation_if_exists -------------------------- + lock_relation_if_exists +--------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'EXCLUSIVE'); - lock_relation_if_exists -------------------------- + lock_relation_if_exists +--------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'ACCESS EXCLUSIVE'); - lock_relation_if_exists -------------------------- + lock_relation_if_exists +--------------------------------------------------------------------- t (1 row) -- see them all SELECT relation::regclass, mode FROM pg_locks WHERE pid = pg_backend_pid() AND relation = 'truncate_from_workers.on_update_fkey_table'::regclass ORDER BY 2 DESC; - relation | mode ---------------------------------------------+-------------------------- + relation | mode +--------------------------------------------------------------------- truncate_from_workers.on_update_fkey_table | ShareUpdateExclusiveLock truncate_from_workers.on_update_fkey_table | ShareRowExclusiveLock truncate_from_workers.on_update_fkey_table | ShareLock diff --git a/src/test/regress/expected/multi_name_lengths.out b/src/test/regress/expected/multi_name_lengths.out index 6c422f5e1..74d74f1de 100644 --- a/src/test/regress/expected/multi_name_lengths.out +++ b/src/test/regress/expected/multi_name_lengths.out @@ -9,22 +9,22 @@ CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, col2 integer not null); SELECT master_create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2'); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port \dt too_long_* List of relations - Schema | Name | Type | Owner ---------+-----------------------------------------------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | too_long_12345678901234567890123456789012345678_e0119164_225000 | table | postgres public | too_long_12345678901234567890123456789012345678_e0119164_225001 | table | postgres (2 rows) @@ -34,23 +34,23 @@ SET citus.shard_count TO 2; SET citus.shard_replication_factor TO 2; -- Verify that the UDF works and rejects bad arguments. SELECT shard_name(NULL, 666666); - shard_name ------------- - + shard_name +--------------------------------------------------------------------- + (1 row) SELECT shard_name(0, 666666); ERROR: object_name does not reference a valid relation SELECT shard_name('too_long_12345678901234567890123456789012345678901234567890'::regclass, 666666); - shard_name ------------------------------------------------------------------ + shard_name +--------------------------------------------------------------------- too_long_12345678901234567890123456789012345678_e0119164_666666 (1 row) SELECT shard_name('too_long_12345678901234567890123456789012345678901234567890'::regclass, NULL); - shard_name ------------- - + shard_name +--------------------------------------------------------------------- + (1 row) SELECT shard_name('too_long_12345678901234567890123456789012345678901234567890'::regclass, -21); @@ -63,9 +63,9 @@ CREATE TABLE name_lengths ( constraint constraint_a UNIQUE (col1) ); SELECT create_distributed_table('name_lengths', 'col1', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Verify that we CAN add columns with "too-long names", because @@ -82,11 +82,11 @@ ALTER TABLE name_lengths ADD CHECK (date_col_12345678901234567890123456789012345 ERROR: cannot create constraint without a name on a distributed table \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.name_lengths_225002'::regclass ORDER BY 1 DESC, 2 DESC; - Column | Type | Modifiers ---------------------------------------------------------------+------------------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- int_col_12345678901234567890123456789012345678901234567890 | integer | default 1 - float_col_12345678901234567890123456789012345678901234567890 | double precision | - date_col_12345678901234567890123456789012345678901234567890 | date | + float_col_12345678901234567890123456789012345678901234567890 | double precision | + date_col_12345678901234567890123456789012345678901234567890 | date | col2 | integer | not null col1 | integer | not null (5 rows) @@ -102,8 +102,8 @@ DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY const ALTER TABLE name_lengths ADD CONSTRAINT nl_checky_12345678901234567890123456789012345678901234567890 CHECK (date_col_12345678901234567890123456789012345678901234567890 >= '2014-01-01'::date); \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.name_lengths_225002'::regclass ORDER BY 1 DESC, 2 DESC; - Constraint | Definition ------------------------------------------------------------------+------------------------------------------------------------------------------------------- + Constraint | Definition +--------------------------------------------------------------------- nl_checky_1234567890123456789012345678901234567_b16df46d_225002 | CHECK (date_col_12345678901234567890123456789012345678901234567890 >= '01-01-2014'::date) (1 row) @@ -111,7 +111,7 @@ SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.name_len -- Placeholders for RENAME operations \set VERBOSITY TERSE ALTER TABLE name_lengths RENAME TO name_len_12345678901234567890123456789012345678901234567890; -ERROR: shard name name_len_12345678901234567890123456789012345678_fcd8ab6f_225002 exceeds 63 characters +ERROR: shard name name_len_12345678901234567890123456789012345678_fcd8ab6f_xxxxx exceeds 63 characters ALTER TABLE name_lengths RENAME CONSTRAINT unique_12345678901234567890123456789012345678901234567890 TO unique2_12345678901234567890123456789012345678901234567890; ERROR: renaming constraints belonging to distributed tables is currently unsupported \set VERBOSITY DEFAULT @@ -120,8 +120,8 @@ CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890 ON name_ \c - - - :worker_1_port SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; - relname | Column | Type | Definition ------------------------------------------------------------------+--------+---------+------------ + relname | Column | Type | Definition +--------------------------------------------------------------------- tmp_idx_123456789012345678901234567890123456789_5e470afa_225003 | col2 | integer | col2 tmp_idx_123456789012345678901234567890123456789_5e470afa_225002 | col2 | integer | col2 (2 rows) @@ -134,8 +134,8 @@ NOTICE: identifier "tmp_idx_123456789012345678901234567890123456789012345678901 \c - - - :worker_1_port SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; - relname | Column | Type | Definition ------------------------------------------------------------------+--------+---------+------------ + relname | Column | Type | Definition +--------------------------------------------------------------------- tmp_idx_123456789012345678901234567890123456789_5e470afa_225003 | col2 | integer | col2 tmp_idx_123456789012345678901234567890123456789_5e470afa_225002 | col2 | integer | col2 tmp_idx_123456789012345678901234567890123456789_599636aa_225003 | col2 | integer | col2 @@ -145,7 +145,7 @@ SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE \c - - - :master_port SET citus.shard_count TO 2; SET citus.shard_replication_factor TO 2; --- Verify that distributed tables with too-long names +-- Verify that distributed tables with too-long names -- for CHECK constraints are no trouble. CREATE TABLE sneaky_name_lengths ( col1 integer not null, @@ -154,9 +154,9 @@ CREATE TABLE sneaky_name_lengths ( CHECK (int_col_12345678901234567890123456789012345678901234567890 > 100) ); SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) DROP TABLE sneaky_name_lengths CASCADE; @@ -167,40 +167,40 @@ CREATE TABLE sneaky_name_lengths ( ); \di public.sneaky_name_lengths* List of relations - Schema | Name | Type | Owner | Table ---------+-----------------------------------------------------------------+-------+----------+--------------------- + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- public | sneaky_name_lengths_int_col_1234567890123456789012345678901_key | index | postgres | sneaky_name_lengths (1 row) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths'::regclass ORDER BY 1 DESC, 2 DESC; - Constraint | Definition ------------------------------------------------------------+------------------------------------------------------------------------------ + Constraint | Definition +--------------------------------------------------------------------- checky_12345678901234567890123456789012345678901234567890 | CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100) (1 row) SELECT master_create_distributed_table('sneaky_name_lengths', 'int_col_123456789012345678901234567890123456789012345678901234', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2'); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port \di public.sneaky*225006 List of relations - Schema | Name | Type | Owner | Table ---------+-----------------------------------------------------------------+-------+----------+---------------------------- + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- public | sneaky_name_lengths_int_col_1234567890123456789_6402d2cd_225006 | index | postgres | sneaky_name_lengths_225006 (1 row) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths_225006'::regclass ORDER BY 1 DESC, 2 DESC; - Constraint | Definition ------------------------------------------------------------+------------------------------------------------------------------------------ + Constraint | Definition +--------------------------------------------------------------------- checky_12345678901234567890123456789012345678901234567890 | CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100) (1 row) @@ -216,16 +216,16 @@ CREATE TABLE sneaky_name_lengths ( constraint unique_12345678901234567890123456789012345678901234567890 UNIQUE (col1) ); SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port \di unique*225008 List of relations - Schema | Name | Type | Owner | Table ---------+-----------------------------------------------------------------+-------+----------+---------------------------- + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- public | unique_1234567890123456789012345678901234567890_a5986f27_225008 | index | postgres | sneaky_name_lengths_225008 (1 row) @@ -239,16 +239,16 @@ CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, col2 integer not null); SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port \dt *225000000000* List of relations - Schema | Name | Type | Owner ---------+-----------------------------------------------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | too_long_1234567890123456789012345678901_e0119164_2250000000000 | table | postgres public | too_long_1234567890123456789012345678901_e0119164_2250000000001 | table | postgres (2 rows) @@ -262,33 +262,33 @@ CREATE TABLE U&"elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E col1 integer not null PRIMARY KEY, col2 integer not null); SELECT create_distributed_table(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', 'col1', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Verify that quoting is used in shard_name SELECT shard_name(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!'::regclass, min(shardid)) FROM pg_dist_shard WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!'::regclass; - shard_name ---------------------------------------------------- + shard_name +--------------------------------------------------------------------- "elephant_слонслонслонсло_c8b737c2_2250000000002" (1 row) \c - - - :worker_1_port \dt public.elephant_* List of relations - Schema | Name | Type | Owner ---------+-------------------------------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | elephant_слонслонслонсло_c8b737c2_2250000000002 | table | postgres public | elephant_слонслонслонсло_c8b737c2_2250000000003 | table | postgres (2 rows) \di public.elephant_* List of relations - Schema | Name | Type | Owner | Table ---------+-------------------------------------------------+-------+----------+------------------------------------------------- + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- public | elephant_слонслонслонсло_14d34928_2250000000002 | index | postgres | elephant_слонслонслонсло_c8b737c2_2250000000002 public | elephant_слонслонслонсло_14d34928_2250000000003 | index | postgres | elephant_слонслонслонсло_c8b737c2_2250000000003 (2 rows) @@ -302,16 +302,16 @@ CREATE TABLE multi_name_lengths.too_long_123456789012345678901234567890123456789 col1 integer not null, col2 integer not null); SELECT create_distributed_table('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT shard_name('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890'::regclass, min(shardid)) FROM pg_dist_shard WHERE logicalrelid = 'multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890'::regclass; - shard_name ------------------------------------------------------------------------------------- + shard_name +--------------------------------------------------------------------- multi_name_lengths.too_long_1234567890123456789012345678901_e0119164_2250000000004 (1 row) diff --git a/src/test/regress/expected/multi_name_resolution.out b/src/test/regress/expected/multi_name_resolution.out index f4ba9d226..890c336bf 100644 --- a/src/test/regress/expected/multi_name_resolution.out +++ b/src/test/regress/expected/multi_name_resolution.out @@ -8,15 +8,15 @@ SET search_path TO multi_name_resolution; create table namenest1 (id integer primary key, user_id integer); create table namenest2 (id integer primary key, value_2 integer); select * from create_distributed_table('namenest1', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) select * from create_reference_table('namenest2'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT r @@ -31,8 +31,8 @@ FROM ( JOIN namenest2 ON (namenest1.user_id = namenest2.value_2) ) AS join_alias(id_deep) WHERE bar.id_deep = join_alias.id_deep; - r ---- + r +--------------------------------------------------------------------- (0 rows) DROP SCHEMA multi_name_resolution CASCADE; diff --git a/src/test/regress/expected/multi_null_minmax_value_pruning.out b/src/test/regress/expected/multi_null_minmax_value_pruning.out index bd5e606d3..ea5b81844 100644 --- a/src/test/regress/expected/multi_null_minmax_value_pruning.out +++ b/src/test/regress/expected/multi_null_minmax_value_pruning.out @@ -12,14 +12,14 @@ RESET citus.task_executor_type; SET citus.log_multi_join_order to true; SET citus.enable_repartition_joins to ON; SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000; - shardminvalue | shardmaxvalue ----------------+--------------- + shardminvalue | shardmaxvalue +--------------------------------------------------------------------- 1 | 5986 (1 row) SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001; - shardminvalue | shardmaxvalue ----------------+--------------- + shardminvalue | shardmaxvalue +--------------------------------------------------------------------- 8997 | 14947 (1 row) @@ -33,8 +33,8 @@ DEBUG: Router planner does not support append-partitioned tables. CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement LOG: join order: [ "lineitem" ] CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement - coordinator_plan ------------------------------- + coordinator_plan +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 2 (2 rows) @@ -46,14 +46,14 @@ DEBUG: Router planner does not support append-partitioned tables. LOG: join order: [ "lineitem" ][ local partition join "orders" ] DEBUG: join prunable for intervals [1,5986] and [8997,14947] DEBUG: join prunable for intervals [8997,14947] and [1,5986] - QUERY PLAN --------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) Task Count: 2 Tasks Shown: All -> Task - Node: host=localhost port=57638 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Hash Join Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) @@ -61,7 +61,7 @@ DEBUG: join prunable for intervals [8997,14947] and [1,5986] -> Hash -> Seq Scan on orders_290002 orders -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Hash Join Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) @@ -82,8 +82,8 @@ DEBUG: Router planner does not support append-partitioned tables. CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement LOG: join order: [ "lineitem" ] CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement - coordinator_plan ------------------------------- + coordinator_plan +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 2 (2 rows) @@ -121,8 +121,8 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 - QUERY PLAN -------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) Task Count: 4 @@ -147,8 +147,8 @@ DEBUG: Router planner does not support append-partitioned tables. CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement LOG: join order: [ "lineitem" ] CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement - coordinator_plan ------------------------------- + coordinator_plan +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 2 (2 rows) @@ -186,8 +186,8 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 - QUERY PLAN -------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) Task Count: 4 @@ -214,8 +214,8 @@ LOG: join order: [ "lineitem" ] CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement DEBUG: Plan is router executable CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement - coordinator_plan ------------------------------- + coordinator_plan +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 (2 rows) @@ -253,8 +253,8 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 - QUERY PLAN -------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) Task Count: 4 diff --git a/src/test/regress/expected/multi_orderby_limit_pushdown.out b/src/test/regress/expected/multi_orderby_limit_pushdown.out index 432dd1f2f..860f8501a 100644 --- a/src/test/regress/expected/multi_orderby_limit_pushdown.out +++ b/src/test/regress/expected/multi_orderby_limit_pushdown.out @@ -9,8 +9,8 @@ FROM users_table GROUP BY user_id ORDER BY avg(value_1) DESC LIMIT 5; - user_id | avg ----------+-------------------- + user_id | avg +--------------------------------------------------------------------- 1 | 3.2857142857142857 4 | 2.7391304347826087 5 | 2.6538461538461538 @@ -23,8 +23,8 @@ FROM users_table GROUP BY user_id ORDER BY avg(value_1) DESC LIMIT 1; - user_id | avg ----------+-------------------- + user_id | avg +--------------------------------------------------------------------- 1 | 3.2857142857142857 (1 row) @@ -34,8 +34,8 @@ FROM users_table GROUP BY user_id ORDER BY avg(value_1) DESC LIMIT 1; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit (cost=0.00..0.00 rows=0 width=0) -> Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: remote_scan.avg DESC @@ -43,7 +43,7 @@ LIMIT 1; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit (cost=1.53..1.53 rows=1 width=36) -> Sort (cost=1.53..1.53 rows=2 width=36) Sort Key: (avg(value_1)) DESC @@ -57,8 +57,8 @@ FROM users_table GROUP BY user_id ORDER BY avg(value_1) + 1 DESC LIMIT 1; - user_id | ?column? ----------+-------------------- + user_id | ?column? +--------------------------------------------------------------------- 1 | 4.2857142857142857 (1 row) @@ -67,8 +67,8 @@ FROM users_table GROUP BY user_id ORDER BY avg(value_1) + 1 DESC LIMIT 1; - user_id | avg ----------+-------------------- + user_id | avg +--------------------------------------------------------------------- 1 | 3.2857142857142857 (1 row) @@ -77,8 +77,8 @@ FROM users_table GROUP BY user_id ORDER BY 2 DESC LIMIT 1; - user_id | ?column? ----------+--------------------- + user_id | ?column? +--------------------------------------------------------------------- 5 | 65.6538461538461538 (1 row) @@ -86,8 +86,8 @@ SELECT user_id, avg(value_1) + count(value_2) FROM users_table GROUP BY user_id ORDER BY 2 DESC; - user_id | ?column? ----------+--------------------- + user_id | ?column? +--------------------------------------------------------------------- 5 | 28.6538461538461538 4 | 25.7391304347826087 2 | 20.3333333333333333 @@ -101,15 +101,15 @@ SELECT user_id, avg(value_1) + count(value_2) FROM users_table GROUP BY user_id ORDER BY 2 DESC; - QUERY PLAN ---------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: remote_scan."?column?" DESC -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate (cost=1.58..1.61 rows=2 width=36) Group Key: user_id -> Seq Scan on users_table_1400256 users_table (cost=0.00..1.33 rows=33 width=12) @@ -120,8 +120,8 @@ FROM users_table GROUP BY user_id ORDER BY 2 DESC LIMIT 1; - user_id | ?column? ----------+--------------------- + user_id | ?column? +--------------------------------------------------------------------- 5 | 28.6538461538461538 (1 row) @@ -130,8 +130,8 @@ FROM users_table GROUP BY user_id ORDER BY 2 DESC LIMIT 1; - user_id | ?column? ----------+---------- + user_id | ?column? +--------------------------------------------------------------------- 5 | 132 (1 row) @@ -140,8 +140,8 @@ FROM users_table GROUP BY user_id ORDER BY sum(value_2) DESC LIMIT 1; - user_id | ?column? ----------+---------- + user_id | ?column? +--------------------------------------------------------------------- 5 | 132 (1 row) @@ -150,8 +150,8 @@ FROM users_table GROUP BY user_id ORDER BY 2 DESC, 1 DESC LIMIT 2; - user_id | ?column? ----------+---------- + user_id | ?column? +--------------------------------------------------------------------- 2 | 25 6 | 20 (2 rows) @@ -161,8 +161,8 @@ FROM users_table GROUP BY user_id ORDER BY 2 DESC, 1 LIMIT 2; - user_id | ?column? ----------+---------- + user_id | ?column? +--------------------------------------------------------------------- 2 | 100 3 | 100 (2 rows) @@ -172,8 +172,8 @@ FROM users_table GROUP BY user_id ORDER BY 2 DESC LIMIT 2; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 5 | 132 4 | 113 (2 rows) @@ -183,8 +183,8 @@ FROM users_table GROUP BY user_id ORDER BY 2 DESC LIMIT 2; - user_id | ?column? ----------+---------- + user_id | ?column? +--------------------------------------------------------------------- 6 | 238 1 | 232 (2 rows) @@ -194,8 +194,8 @@ FROM users_table GROUP BY user_id ORDER BY (10000 / (sum(value_1 + value_2))) DESC LIMIT 2; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 6 | 42 1 | 43 (2 rows) @@ -205,8 +205,8 @@ FROM users_table GROUP BY user_id ORDER BY (10000 / (sum(value_1 + value_2))) DESC LIMIT 2; - user_id ---------- + user_id +--------------------------------------------------------------------- 6 1 (2 rows) @@ -217,8 +217,8 @@ FROM users_table GROUP BY user_id ORDER BY (10000 / (sum(value_1 + value_2))) DESC LIMIT 2; - QUERY PLAN ---------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.worker_column_2 DESC @@ -226,7 +226,7 @@ LIMIT 2; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Sort Sort Key: ((10000 / sum((value_1 + value_2)))) DESC @@ -239,8 +239,8 @@ SELECT 10000 / (sum(value_1 + value_2)) FROM users_table ORDER BY 1 DESC LIMIT 2; - ?column? ----------- + ?column? +--------------------------------------------------------------------- 19 (1 row) @@ -249,8 +249,8 @@ FROM users_table GROUP BY user_id ORDER BY user_id * avg(value_1) DESC LIMIT 2; - user_id | avg ----------+-------------------- + user_id | avg +--------------------------------------------------------------------- 5 | 2.6538461538461538 6 | 2.1000000000000000 (2 rows) @@ -260,8 +260,8 @@ FROM users_table GROUP BY user_id ORDER BY user_id * avg(value_1 + value_2) DESC LIMIT 2; - user_id | avg ----------+-------------------- + user_id | avg +--------------------------------------------------------------------- 5 | 2.6538461538461538 6 | 2.1000000000000000 (2 rows) @@ -271,8 +271,8 @@ FROM users_table GROUP BY user_id ORDER BY sum(value_1) DESC LIMIT 2; - user_id ---------- + user_id +--------------------------------------------------------------------- 5 4 (2 rows) @@ -283,8 +283,8 @@ FROM users_table GROUP BY user_id ORDER BY sum(value_1) DESC LIMIT 2; - QUERY PLAN ---------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.worker_column_2 DESC @@ -292,7 +292,7 @@ LIMIT 2; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Sort Sort Key: (sum(value_1)) DESC @@ -307,8 +307,8 @@ WHERE ut.user_id = et.user_id and et.value_2 < 5 GROUP BY ut.user_id ORDER BY MAX(et.time), AVG(ut.value_1) LIMIT 5; - user_id | avg ----------+-------------------- + user_id | avg +--------------------------------------------------------------------- 6 | 2.1000000000000000 2 | 2.7777777777777778 5 | 2.4230769230769231 @@ -323,8 +323,8 @@ WHERE ut.user_id = et.user_id and et.value_2 < 5 GROUP BY ut.user_id ORDER BY MAX(et.time), AVG(ut.value_1) LIMIT 5; - QUERY PLAN -------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.worker_column_3, remote_scan.worker_column_4 @@ -332,7 +332,7 @@ LIMIT 5; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Sort Sort Key: (max(et."time")), (avg(ut.value_1)) @@ -352,8 +352,8 @@ WHERE ut.user_id = et.user_id and et.value_2 < 5 GROUP BY ut.user_id ORDER BY avg(ut.value_2) DESC, AVG(et.value_2) LIMIT 5; - user_id | avg ----------+-------------------- + user_id | avg +--------------------------------------------------------------------- 3 | 1.8947368421052632 1 | 2.4615384615384615 2 | 2.0000000000000000 @@ -367,8 +367,8 @@ WHERE ut.user_id = et.user_id and et.value_2 < 5 GROUP BY ut.user_id ORDER BY 2, AVG(ut.value_1), 1 DESC LIMIT 2; - user_id | count ----------+------- + user_id | count +--------------------------------------------------------------------- 1 | 4 6 | 5 (2 rows) @@ -380,8 +380,8 @@ WHERE ut.user_id = et.user_id and et.value_2 < 5 GROUP BY ut.user_id ORDER BY 2, AVG(ut.value_1), 1 DESC LIMIT 5; - QUERY PLAN ------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.count, remote_scan.worker_column_3, remote_scan.user_id DESC @@ -389,7 +389,7 @@ LIMIT 5; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Sort Sort Key: (count(DISTINCT ut.value_2)), (avg(ut.value_1)), ut.user_id DESC diff --git a/src/test/regress/expected/multi_partition_pruning.out b/src/test/regress/expected/multi_partition_pruning.out index f56698c7f..1d65721b7 100644 --- a/src/test/regress/expected/multi_partition_pruning.out +++ b/src/test/regress/expected/multi_partition_pruning.out @@ -6,8 +6,8 @@ SET citus.next_shard_id TO 770000; -- Adding additional l_orderkey = 1 to make this query not router executable SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1 ORDER BY 1,2; - l_orderkey | l_linenumber | l_shipdate -------------+--------------+------------ + l_orderkey | l_linenumber | l_shipdate +--------------------------------------------------------------------- 1 | 1 | 03-13-1996 1 | 2 | 04-12-1996 1 | 3 | 01-29-1996 @@ -28,23 +28,23 @@ SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 903 -- trigger the creation of toasted tables and indexes. This in turn prints -- non-deterministic debug messages. To avoid this chain, we use l_linenumber. SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; - sum | avg --------+-------------------- + sum | avg +--------------------------------------------------------------------- 17999 | 3.0189533713518953 (1 row) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE (l_orderkey < 4000 OR l_orderkey > 9030); - sum | avg --------+-------------------- + sum | avg +--------------------------------------------------------------------- 30184 | 3.0159872102318145 (1 row) -- The following query should prune out all shards and return empty results SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 20000; - sum | avg ------+----- - | + sum | avg +--------------------------------------------------------------------- + | (1 row) -- The tests below verify that we can prune shards partitioned over different @@ -58,9 +58,9 @@ CREATE TABLE varchar_partitioned_table varchar_column varchar(100) ); SELECT create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Create logical shards and shard placements with shardid 100,101 @@ -86,9 +86,9 @@ CREATE TABLE array_partitioned_table array_column text[] ); SELECT create_distributed_table('array_partitioned_table', 'array_column', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET client_min_messages TO DEBUG2; @@ -122,9 +122,9 @@ CREATE TABLE composite_partitioned_table composite_column composite_type ); SELECT create_distributed_table('composite_partitioned_table', 'composite_column', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET client_min_messages TO DEBUG2; @@ -150,8 +150,8 @@ INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, SET client_min_messages TO ERROR; EXPLAIN (COSTS OFF) SELECT count(*) FROM varchar_partitioned_table WHERE varchar_column = 'BA2'; - QUERY PLAN -------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) Task Count: 1 @@ -163,8 +163,8 @@ SELECT count(*) FROM varchar_partitioned_table WHERE varchar_column = 'BA2'; EXPLAIN (COSTS OFF) SELECT count(*) FROM array_partitioned_table WHERE array_column > '{BA1000U2AMO4ZGX, BZZXSP27F21T6}'; - QUERY PLAN -------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) Task Count: 1 @@ -176,8 +176,8 @@ SELECT count(*) FROM array_partitioned_table EXPLAIN (COSTS OFF) SELECT count(*) FROM composite_partitioned_table WHERE composite_column < '(b,5,c)'::composite_type; - QUERY PLAN -------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) Task Count: 1 diff --git a/src/test/regress/expected/multi_partitioning.out b/src/test/regress/expected/multi_partitioning.out index d0b4a2ec7..26db24c0b 100644 --- a/src/test/regress/expected/multi_partitioning.out +++ b/src/test/regress/expected/multi_partitioning.out @@ -29,23 +29,23 @@ INSERT INTO partitioning_hash_test VALUES (4, 4); SELECT create_distributed_table('partitioning_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('partitioning_hash_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 3 | 09-09-2009 @@ -53,8 +53,8 @@ SELECT * FROM partitioning_test ORDER BY 1; (4 rows) SELECT * FROM partitioning_hash_test ORDER BY 1; - id | subid -----+------- + id | subid +--------------------------------------------------------------------- 1 | 2 2 | 13 3 | 7 @@ -69,8 +69,8 @@ FROM WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - logicalrelid ------------------------- + logicalrelid +--------------------------------------------------------------------- partitioning_test partitioning_test_2009 partitioning_test_2010 @@ -84,8 +84,8 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count -------------------------+------- + logicalrelid | count +--------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2009 | 4 partitioning_test_2010 | 4 @@ -98,8 +98,8 @@ FROM WHERE logicalrelid IN ('partitioning_hash_test', 'partitioning_hash_test_0', 'partitioning_hash_test_1') ORDER BY 1; - logicalrelid --------------------------- + logicalrelid +--------------------------------------------------------------------- partitioning_hash_test partitioning_hash_test_0 partitioning_hash_test_1 @@ -113,8 +113,8 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count ---------------------------+------- + logicalrelid | count +--------------------------------------------------------------------- partitioning_hash_test | 4 partitioning_hash_test_0 | 4 partitioning_hash_test_1 | 4 @@ -130,8 +130,8 @@ FROM WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2011') ORDER BY 1; - logicalrelid ------------------------- + logicalrelid +--------------------------------------------------------------------- partitioning_test partitioning_test_2011 (2 rows) @@ -144,8 +144,8 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count -------------------------+------- + logicalrelid | count +--------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2011 | 4 (2 rows) @@ -165,8 +165,8 @@ FROM WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2012') ORDER BY 1; - logicalrelid ------------------------- + logicalrelid +--------------------------------------------------------------------- partitioning_test partitioning_test_2012 (2 rows) @@ -179,8 +179,8 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count -------------------------+------- + logicalrelid | count +--------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2012 | 4 (2 rows) @@ -190,11 +190,11 @@ ORDER BY INSERT INTO partitioning_hash_test VALUES (8, 5); ERROR: no partition of relation "partitioning_hash_test_1660012" found for row DETAIL: Partition key of the failing row contains (subid) = (5). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx INSERT INTO partitioning_hash_test VALUES (9, 12); ERROR: no partition of relation "partitioning_hash_test_1660015" found for row DETAIL: Partition key of the failing row contains (subid) = (12). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE partitioning_hash_test_2 (id int, subid int); INSERT INTO partitioning_hash_test_2 VALUES (8, 5); ALTER TABLE partitioning_hash_test ATTACH PARTITION partitioning_hash_test_2 FOR VALUES WITH (MODULUS 3, REMAINDER 2); @@ -202,8 +202,8 @@ NOTICE: Copying data from local table... INSERT INTO partitioning_hash_test VALUES (9, 12); -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 3 | 09-09-2009 @@ -213,8 +213,8 @@ SELECT * FROM partitioning_test ORDER BY 1; (6 rows) SELECT * FROM partitioning_hash_test ORDER BY 1; - id | subid -----+------- + id | subid +--------------------------------------------------------------------- 1 | 2 2 | 13 3 | 7 @@ -226,9 +226,9 @@ SELECT * FROM partitioning_hash_test ORDER BY 1; -- 4-) Attaching distributed table to distributed table CREATE TABLE partitioning_test_2013(id int, time date); SELECT create_distributed_table('partitioning_test_2013', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- load some data @@ -237,8 +237,8 @@ INSERT INTO partitioning_test_2013 VALUES (8, '2013-07-07'); ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2013 FOR VALUES FROM ('2013-01-01') TO ('2014-01-01'); -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 3 | 09-09-2009 @@ -269,9 +269,9 @@ SET citus.shard_replication_factor TO 1; DROP TABLE partitioning_test_failure_2009; CREATE TABLE partitioning_test_failure_2009(id int, time date); SELECT create_distributed_table('partitioning_test_failure_2009', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE partitioning_test_failure ATTACH PARTITION partitioning_test_failure_2009 FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); @@ -286,9 +286,9 @@ DETAIL: Relation "partitioning_test_failure_2009" is partitioned table itself a -- multi-level partitioning is not allowed in different order DROP TABLE partitioning_test_failure_2009; SELECT create_distributed_table('partitioning_test_failure', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE partitioning_test_failure_2009 PARTITION OF partitioning_test_failure FOR VALUES FROM ('2009-01-01') TO ('2010-01-01') PARTITION BY RANGE (time); @@ -304,8 +304,8 @@ COPY partitioning_test FROM STDIN WITH CSV; COPY partitioning_test_2009 FROM STDIN WITH CSV; -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id >= 9 ORDER BY 1; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 9 | 01-01-2009 10 | 01-01-2010 11 | 01-01-2011 @@ -325,8 +325,8 @@ INSERT INTO partitioning_test VALUES(19, '2009-02-02'); INSERT INTO partitioning_test VALUES(20, '2010-02-02'); -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id >= 15 ORDER BY 1; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 15 | 02-01-2009 16 | 02-01-2010 17 | 02-01-2011 @@ -342,8 +342,8 @@ INSERT INTO partitioning_test SELECT * FROM partitioning_test_2011; INSERT INTO partitioning_test_2012 SELECT * FROM partitioning_test WHERE time >= '2012-01-01' AND time < '2013-01-01'; -- see the data is loaded to shards (rows in the given range should be duplicated) SELECT * FROM partitioning_test WHERE time >= '2011-01-01' AND time < '2013-01-01' ORDER BY 1; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 5 | 06-06-2012 5 | 06-06-2012 6 | 07-07-2012 @@ -365,8 +365,8 @@ UPDATE partitioning_test SET time = '2013-07-07' WHERE id = 7; UPDATE partitioning_test_2013 SET time = '2013-08-08' WHERE id = 8; -- see the data is updated SELECT * FROM partitioning_test WHERE id = 7 OR id = 8 ORDER BY 1; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 7 | 07-07-2013 8 | 08-08-2013 (2 rows) @@ -375,7 +375,7 @@ SELECT * FROM partitioning_test WHERE id = 7 OR id = 8 ORDER BY 1; UPDATE partitioning_test SET time = '2020-07-07' WHERE id = 7; ERROR: no partition of relation "partitioning_test_1660001" found for row DETAIL: Partition key of the failing row contains ("time") = (2020-07-07). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx -- UPDATE with subqueries on partitioned table UPDATE partitioning_test @@ -392,8 +392,8 @@ WHERE id IN (SELECT id FROM partitioning_test WHERE id = 2); -- see the data is updated SELECT * FROM partitioning_test WHERE id = 1 OR id = 2 ORDER BY 1; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 1 | 06-07-2009 2 | 07-07-2010 (2 rows) @@ -405,18 +405,18 @@ DELETE FROM partitioning_test WHERE id = 9; DELETE FROM partitioning_test_2010 WHERE id = 10; -- see the data is deleted SELECT * FROM partitioning_test WHERE id = 9 OR id = 10 ORDER BY 1; - id | time -----+------ + id | time +--------------------------------------------------------------------- (0 rows) -- create default partition CREATE TABLE partitioning_test_default PARTITION OF partitioning_test DEFAULT; \d+ partitioning_test - Partitioned table "public.partitioning_test" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - id | integer | | | | plain | | - time | date | | | | plain | | + Table "public.partitioning_test" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------------------------------------------------------------------- + id | integer | | | | plain | | + time | date | | | | plain | | Partition key: RANGE ("time") Partitions: partitioning_test_2009 FOR VALUES FROM ('01-01-2009') TO ('01-01-2010'), partitioning_test_2010 FOR VALUES FROM ('01-01-2010') TO ('01-01-2011'), @@ -429,15 +429,15 @@ INSERT INTO partitioning_test VALUES(21, '2014-02-02'); INSERT INTO partitioning_test VALUES(22, '2015-04-02'); -- see they are inserted into default partition SELECT * FROM partitioning_test WHERE id > 20 ORDER BY 1, 2; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 21 | 02-02-2014 22 | 04-02-2015 (2 rows) SELECT * FROM partitioning_test_default ORDER BY 1, 2; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 21 | 02-02-2014 22 | 04-02-2015 (2 rows) @@ -445,7 +445,7 @@ SELECT * FROM partitioning_test_default ORDER BY 1, 2; -- create a new partition (will fail) CREATE TABLE partitioning_test_2014 PARTITION OF partitioning_test FOR VALUES FROM ('2014-01-01') TO ('2015-01-01'); ERROR: updated partition constraint for default partition would be violated by some row -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx BEGIN; ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_default; CREATE TABLE partitioning_test_2014 PARTITION OF partitioning_test FOR VALUES FROM ('2014-01-01') TO ('2015-01-01'); @@ -455,15 +455,15 @@ ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_default DEFAULT END; -- see data is in the table, but some moved out from default partition SELECT * FROM partitioning_test WHERE id > 20 ORDER BY 1, 2; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 21 | 02-02-2014 22 | 04-02-2015 (2 rows) SELECT * FROM partitioning_test_default ORDER BY 1, 2; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 22 | 04-02-2015 (1 row) @@ -471,8 +471,8 @@ SELECT * FROM partitioning_test_default ORDER BY 1, 2; UPDATE partitioning_test SET time = time + INTERVAL '1 day'; -- see rows are UPDATED SELECT * FROM partitioning_test ORDER BY 1; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 1 | 06-08-2009 2 | 07-08-2010 3 | 09-10-2009 @@ -505,8 +505,8 @@ SELECT * FROM partitioning_test ORDER BY 1; UPDATE partitioning_test_2009 SET time = time + INTERVAL '1 day'; -- see rows are UPDATED SELECT * FROM partitioning_test_2009 ORDER BY 1; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 1 | 06-09-2009 3 | 09-11-2009 13 | 01-04-2009 @@ -519,7 +519,7 @@ SELECT * FROM partitioning_test_2009 ORDER BY 1; UPDATE partitioning_test_2009 SET time = time + INTERVAL '6 month'; ERROR: new row for relation "partitioning_test_2009_1660005" violates partition constraint DETAIL: Failing row contains (3, 2010-03-11). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx -- -- DDL in distributed partitioned tables -- @@ -533,8 +533,8 @@ CREATE INDEX partitioning_2009_index ON partitioning_test_2009(id); CREATE INDEX CONCURRENTLY partitioned_2010_index ON partitioning_test_2010(id); -- see index is created SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'partitioning_test_%' ORDER BY indexname; - tablename | indexname ----------------------------+---------------------------------- + tablename | indexname +--------------------------------------------------------------------- partitioning_test_2010 | partitioned_2010_index partitioning_test_2009 | partitioning_2009_index partitioning_test_2009 | partitioning_test_2009_id_idx @@ -564,16 +564,16 @@ FOR VALUES FROM (0) TO (10); CREATE INDEX non_distributed_partitioned_table_index ON non_distributed_partitioned_table(a); -- see index is created SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'non_distributed_partitioned_table_%' ORDER BY indexname; - tablename | indexname --------------------------------------+------------------------------------------- + tablename | indexname +--------------------------------------------------------------------- non_distributed_partitioned_table_1 | non_distributed_partitioned_table_1_a_idx (1 row) -- drop the index and see it is dropped DROP INDEX non_distributed_partitioned_table_index; SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'non_distributed%' ORDER BY indexname; - tablename | indexname ------------+----------- + tablename | indexname +--------------------------------------------------------------------- (0 rows) -- test add COLUMN @@ -584,16 +584,16 @@ ALTER TABLE partitioning_test_2010 ADD new_column_2 int; ERROR: cannot add column to a partition -- see additional column is created SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; - name | type -------------+--------- + name | type +--------------------------------------------------------------------- id | integer new_column | integer time | date (3 rows) SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test_2010'::regclass ORDER BY 1; - name | type -------------+--------- + name | type +--------------------------------------------------------------------- id | integer new_column | integer time | date @@ -616,8 +616,8 @@ FROM WHERE table_name = 'partitioning_test_2009' AND constraint_name = 'partitioning_2009_primary'; - table_name | constraint_name | constraint_type -------------------------+---------------------------+----------------- + table_name | constraint_name | constraint_type +--------------------------------------------------------------------- partitioning_test_2009 | partitioning_2009_primary | PRIMARY KEY (1 row) @@ -634,8 +634,8 @@ WHERE table_name LIKE 'partitioning_hash_test%' AND constraint_type = 'PRIMARY KEY' ORDER BY 1; - table_name | constraint_name | constraint_type ---------------------------+-------------------------------+----------------- + table_name | constraint_name | constraint_type +--------------------------------------------------------------------- partitioning_hash_test | partitioning_hash_primary | PRIMARY KEY partitioning_hash_test_0 | partitioning_hash_test_0_pkey | PRIMARY KEY partitioning_hash_test_1 | partitioning_hash_test_1_pkey | PRIMARY KEY @@ -654,8 +654,8 @@ INSERT INTO partitioning_test_2009 VALUES(18, '2009-02-01'); ALTER TABLE partitioning_test_2012 ADD CONSTRAINT partitioning_2012_foreign FOREIGN KEY (id) REFERENCES partitioning_test_2009 (id) ON DELETE CASCADE; -- see FOREIGN KEY is created SELECT "Constraint" FROM table_fkeys WHERE relid = 'partitioning_test_2012'::regclass ORDER BY 1; - Constraint ---------------------------- + Constraint +--------------------------------------------------------------------- partitioning_2012_foreign (1 row) @@ -663,28 +663,28 @@ SELECT "Constraint" FROM table_fkeys WHERE relid = 'partitioning_test_2012'::reg DELETE FROM partitioning_test_2009 WHERE id = 5; -- see that element is deleted from both partitions SELECT * FROM partitioning_test_2009 WHERE id = 5 ORDER BY 1; - id | time | new_column -----+------+------------ + id | time | new_column +--------------------------------------------------------------------- (0 rows) SELECT * FROM partitioning_test_2012 WHERE id = 5 ORDER BY 1; - id | time | new_column -----+------+------------ + id | time | new_column +--------------------------------------------------------------------- (0 rows) -- test DETACH partition ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_2009; -- see DETACHed partitions content is not accessible from partitioning_test; SELECT * FROM partitioning_test WHERE time >= '2009-01-01' AND time < '2010-01-01' ORDER BY 1; - id | time | new_column -----+------+------------ + id | time | new_column +--------------------------------------------------------------------- (0 rows) -- delete from default partition DELETE FROM partitioning_test WHERE time >= '2015-01-01'; SELECT * FROM partitioning_test_default; - id | time | new_column -----+------+------------ + id | time | new_column +--------------------------------------------------------------------- (0 rows) -- create a reference table for foreign key test @@ -692,17 +692,17 @@ CREATE TABLE partitioning_test_reference(id int PRIMARY KEY, subid int); INSERT INTO partitioning_test_reference SELECT a, a FROM generate_series(1, 50) a; SELECT create_reference_table('partitioning_test_reference'); NOTICE: Copying data from local table... - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE partitioning_test ADD CONSTRAINT partitioning_reference_fkey FOREIGN KEY (id) REFERENCES partitioning_test_reference(id) ON DELETE CASCADE; CREATE TABLE partitioning_test_foreign_key(id int PRIMARY KEY, value int); SELECT create_distributed_table('partitioning_test_foreign_key', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO partitioning_test_foreign_key SELECT * FROM partitioning_test_reference; @@ -715,8 +715,8 @@ WHERE constraint_type = 'FOREIGN KEY' ORDER BY 1,2; - table_name | constraint_name | constraint_type ---------------------------+--------------------------------+----------------- + table_name | constraint_name | constraint_type +--------------------------------------------------------------------- partitioning_hash_test | partitioning_reference_fk_test | FOREIGN KEY partitioning_hash_test_0 | partitioning_reference_fk_test | FOREIGN KEY partitioning_hash_test_1 | partitioning_reference_fk_test | FOREIGN KEY @@ -744,8 +744,8 @@ SELECT right(table_name, 7)::int as shardid, * FROM ( ) q $$) ) w ORDER BY 1, 2, 3, 4; - shardid | table_name | constraint_name | constraint_type ----------+----------------------------------+----------------------------------------+----------------- + shardid | table_name | constraint_name | constraint_type +--------------------------------------------------------------------- 1660012 | partitioning_hash_test_1660012 | partitioning_reference_fk_test_1660012 | FOREIGN KEY 1660013 | partitioning_hash_test_1660013 | partitioning_reference_fk_test_1660013 | FOREIGN KEY 1660014 | partitioning_hash_test_1660014 | partitioning_reference_fk_test_1660014 | FOREIGN KEY @@ -769,18 +769,18 @@ DROP TYPE foreign_key_details; -- after connection re-establishment SET citus.shard_replication_factor TO 1; SELECT * FROM partitioning_test WHERE id = 11 or id = 12; - id | time | new_column -----+------------+------------ - 11 | 01-02-2011 | - 11 | 01-02-2011 | - 12 | 01-02-2012 | - 12 | 01-02-2012 | + id | time | new_column +--------------------------------------------------------------------- + 11 | 01-02-2011 | + 11 | 01-02-2011 | + 12 | 01-02-2012 | + 12 | 01-02-2012 | (4 rows) DELETE FROM partitioning_test_reference WHERE id = 11 or id = 12; SELECT * FROM partitioning_hash_test ORDER BY 1, 2; - id | subid -----+------- + id | subid +--------------------------------------------------------------------- 1 | 2 2 | 13 3 | 7 @@ -792,13 +792,13 @@ SELECT * FROM partitioning_hash_test ORDER BY 1, 2; DELETE FROM partitioning_test_foreign_key WHERE id = 2 OR id = 9; -- see data is deleted from referencing table SELECT * FROM partitioning_test WHERE id = 11 or id = 12; - id | time | new_column -----+------+------------ + id | time | new_column +--------------------------------------------------------------------- (0 rows) SELECT * FROM partitioning_hash_test ORDER BY 1, 2; - id | subid -----+------- + id | subid +--------------------------------------------------------------------- 1 | 2 3 | 7 4 | 4 @@ -813,8 +813,8 @@ BEGIN; ALTER TABLE partitioning_test ADD newer_column int; -- see additional column is created SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; - name | type ---------------+--------- + name | type +--------------------------------------------------------------------- id | integer new_column | integer newer_column | integer @@ -824,8 +824,8 @@ SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass O ROLLBACK; -- see rollback is successful SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; - name | type -------------+--------- + name | type +--------------------------------------------------------------------- id | integer new_column | integer time | date @@ -836,28 +836,28 @@ BEGIN; COPY partitioning_test FROM STDIN WITH CSV; -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 22 ORDER BY 1; - id | time | new_column -----+------------+------------ + id | time | new_column +--------------------------------------------------------------------- 22 | 01-01-2010 | 22 (1 row) SELECT * FROM partitioning_test WHERE id = 23 ORDER BY 1; - id | time | new_column -----+------------+------------ + id | time | new_column +--------------------------------------------------------------------- 23 | 01-01-2011 | 23 (1 row) SELECT * FROM partitioning_test WHERE id = 24 ORDER BY 1; - id | time | new_column -----+------------+------------ + id | time | new_column +--------------------------------------------------------------------- 24 | 01-01-2013 | 24 (1 row) ROLLBACK; -- see rollback is successful SELECT * FROM partitioning_test WHERE id >= 22 ORDER BY 1; - id | time | new_column -----+------+------------ + id | time | new_column +--------------------------------------------------------------------- (0 rows) -- DML in transaction @@ -866,29 +866,29 @@ BEGIN; INSERT INTO partitioning_test VALUES(25, '2010-02-02'); -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; - id | time | new_column -----+------------+------------ - 25 | 02-02-2010 | + id | time | new_column +--------------------------------------------------------------------- + 25 | 02-02-2010 | (1 row) -- INSERT/SELECT in transaction INSERT INTO partitioning_test SELECT * FROM partitioning_test WHERE id = 25; -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; - id | time | new_column -----+------------+------------ - 25 | 02-02-2010 | - 25 | 02-02-2010 | + id | time | new_column +--------------------------------------------------------------------- + 25 | 02-02-2010 | + 25 | 02-02-2010 | (2 rows) -- UPDATE in transaction UPDATE partitioning_test SET time = '2010-10-10' WHERE id = 25; -- see the data is updated SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; - id | time | new_column -----+------------+------------ - 25 | 10-10-2010 | - 25 | 10-10-2010 | + id | time | new_column +--------------------------------------------------------------------- + 25 | 10-10-2010 | + 25 | 10-10-2010 | (2 rows) -- perform operations on partition and partioned tables together @@ -898,8 +898,8 @@ COPY partitioning_test FROM STDIN WITH CSV; COPY partitioning_test_2010 FROM STDIN WITH CSV; -- see the data is loaded to shards (we should see 4 rows with same content) SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; - id | time | new_column -----+------------+------------ + id | time | new_column +--------------------------------------------------------------------- 26 | 02-02-2010 | 26 26 | 02-02-2010 | 26 26 | 02-02-2010 | 26 @@ -909,8 +909,8 @@ SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; ROLLBACK; -- see rollback is successful SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; - id | time | new_column -----+------+------------ + id | time | new_column +--------------------------------------------------------------------- (0 rows) -- DETACH and DROP in a transaction @@ -920,8 +920,8 @@ DROP TABLE partitioning_test_2011; COMMIT; -- see DROPed partitions content is not accessible SELECT * FROM partitioning_test WHERE time >= '2011-01-01' AND time < '2012-01-01' ORDER BY 1; - id | time | new_column -----+------+------------ + id | time | new_column +--------------------------------------------------------------------- (0 rows) -- @@ -932,16 +932,16 @@ SELECT * FROM partitioning_test WHERE time >= '2011-01-01' AND time < '2012-01-0 TRUNCATE partitioning_test_2012; -- see partition is TRUNCATEd SELECT * FROM partitioning_test_2012 ORDER BY 1; - id | time | new_column -----+------+------------ + id | time | new_column +--------------------------------------------------------------------- (0 rows) -- test TRUNCATE partitioned table TRUNCATE partitioning_test; -- see partitioned table is TRUNCATEd SELECT * FROM partitioning_test ORDER BY 1; - id | time | new_column -----+------+------------ + id | time | new_column +--------------------------------------------------------------------- (0 rows) -- test DROP @@ -950,8 +950,8 @@ INSERT INTO partitioning_test_2010 VALUES(27, '2010-02-01'); DROP TABLE partitioning_test_2010; -- see DROPped partitions content is not accessible from partitioning_test; SELECT * FROM partitioning_test WHERE time >= '2010-01-01' AND time < '2011-01-01' ORDER BY 1; - id | time | new_column -----+------+------------ + id | time | new_column +--------------------------------------------------------------------- (0 rows) -- test DROP partitioned table @@ -959,8 +959,8 @@ DROP TABLE partitioning_test; DROP TABLE partitioning_test_reference; -- dropping the parent should CASCADE to the children as well SELECT table_name FROM information_schema.tables WHERE table_name LIKE 'partitioning_test%' ORDER BY 1; - table_name -------------------------------- + table_name +--------------------------------------------------------------------- partitioning_test_2009 partitioning_test_failure partitioning_test_foreign_key @@ -970,15 +970,15 @@ SELECT table_name FROM information_schema.tables WHERE table_name LIKE 'partitio CREATE TABLE partitioned_users_table (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint) PARTITION BY RANGE (time); CREATE TABLE partitioned_events_table (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint) PARTITION BY RANGE (time); SELECT create_distributed_table('partitioned_users_table', 'user_id', colocate_with => 'users_table'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('partitioned_events_table', 'user_id', colocate_with => 'events_table'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- INSERT/SELECT from regular table to partitioned table @@ -1031,8 +1031,8 @@ FROM ) AS final_query GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 43 1 | 44 2 | 8 @@ -1110,8 +1110,8 @@ GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 367 2 | 360 3 | 57 @@ -1124,9 +1124,9 @@ CREATE TABLE list_partitioned_events_table_2014_01_06_10 PARTITION OF list_parti CREATE TABLE list_partitioned_events_table_2014_01_11_15 PARTITION OF list_partitioned_events_table FOR VALUES IN ('2017-12-01', '2017-12-02', '2017-12-03', '2017-12-04', '2017-12-05'); -- test distributing partitioned table colocated with another partitioned table SELECT create_distributed_table('list_partitioned_events_table', 'user_id', colocate_with => 'partitioned_events_table'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- INSERT/SELECT from partitioned table to partitioned table @@ -1181,8 +1181,8 @@ count(*) AS cnt, "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; - cnt | generated_group_field -------+----------------------- + cnt | generated_group_field +--------------------------------------------------------------------- 1851 | 1 1077 | 4 963 | 2 @@ -1198,9 +1198,9 @@ count(*) AS cnt, "generated_group_field" CREATE TABLE multi_column_partitioning(c1 int, c2 int) PARTITION BY RANGE (c1, c2); CREATE TABLE multi_column_partitioning_0_0_10_0 PARTITION OF multi_column_partitioning FOR VALUES FROM (0, 0) TO (10, 0); SELECT create_distributed_table('multi_column_partitioning', 'c1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- test INSERT to multi-column partitioned table @@ -1210,7 +1210,7 @@ INSERT INTO multi_column_partitioning_0_0_10_0 VALUES(5, -5); INSERT INTO multi_column_partitioning VALUES(10, 1); ERROR: no partition of relation "multi_column_partitioning_1660101" found for row DETAIL: Partition key of the failing row contains (c1, c2) = (10, 1). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- test with MINVALUE/MAXVALUE CREATE TABLE multi_column_partitioning_10_max_20_min PARTITION OF multi_column_partitioning FOR VALUES FROM (10, MAXVALUE) TO (20, MINVALUE); -- test INSERT to partition with MINVALUE/MAXVALUE bounds @@ -1220,11 +1220,11 @@ INSERT INTO multi_column_partitioning_10_max_20_min VALUES(19, -19); INSERT INTO multi_column_partitioning VALUES(20, -20); ERROR: no partition of relation "multi_column_partitioning_1660101" found for row DETAIL: Partition key of the failing row contains (c1, c2) = (20, -20). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- see data is loaded to multi-column partitioned table SELECT * FROM multi_column_partitioning ORDER BY 1, 2; - c1 | c2 -----+----- + c1 | c2 +--------------------------------------------------------------------- 1 | 1 5 | -5 11 | -11 @@ -1240,21 +1240,21 @@ CREATE TABLE partitioning_locks_2009 PARTITION OF partitioning_locks FOR VALUES CREATE TABLE partitioning_locks_2010 PARTITION OF partitioning_locks FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); -- distribute partitioned table SELECT create_distributed_table('partitioning_locks', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- test locks on router SELECT BEGIN; SELECT * FROM partitioning_locks WHERE id = 1 ORDER BY 1, 2; - id | ref_id | time -----+--------+------ + id | ref_id | time +--------------------------------------------------------------------- (0 rows) SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode --------------------------+----------+----------------- + relation | locktype | mode +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessShareLock partitioning_locks_2010 | relation | AccessShareLock @@ -1264,13 +1264,13 @@ COMMIT; -- test locks on real-time SELECT BEGIN; SELECT * FROM partitioning_locks ORDER BY 1, 2; - id | ref_id | time -----+--------+------ + id | ref_id | time +--------------------------------------------------------------------- (0 rows) SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode --------------------------+----------+----------------- + relation | locktype | mode +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessShareLock partitioning_locks_2010 | relation | AccessShareLock @@ -1281,13 +1281,13 @@ COMMIT; SET citus.task_executor_type TO 'task-tracker'; BEGIN; SELECT * FROM partitioning_locks AS pl1 JOIN partitioning_locks AS pl2 ON pl1.id = pl2.ref_id ORDER BY 1, 2; - id | ref_id | time | id | ref_id | time -----+--------+------+----+--------+------ + id | ref_id | time | id | ref_id | time +--------------------------------------------------------------------- (0 rows) SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode --------------------------+----------+----------------- + relation | locktype | mode +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessShareLock partitioning_locks_2010 | relation | AccessShareLock @@ -1299,8 +1299,8 @@ RESET citus.task_executor_type; BEGIN; INSERT INTO partitioning_locks VALUES(1, 1, '2009-01-01'); SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode --------------------------+----------+------------------ + relation | locktype | mode +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | AccessShareLock @@ -1314,8 +1314,8 @@ COMMIT; BEGIN; UPDATE partitioning_locks SET time = '2009-02-01' WHERE id = 1; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode --------------------------+----------+------------------ + relation | locktype | mode +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | AccessShareLock @@ -1329,8 +1329,8 @@ COMMIT; BEGIN; DELETE FROM partitioning_locks WHERE id = 1; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode --------------------------+----------+------------------ + relation | locktype | mode +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | AccessShareLock @@ -1343,16 +1343,16 @@ COMMIT; -- test locks on INSERT/SELECT CREATE TABLE partitioning_locks_for_select(id int, ref_id int, time date); SELECT create_distributed_table('partitioning_locks_for_select', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) BEGIN; INSERT INTO partitioning_locks SELECT * FROM partitioning_locks_for_select; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode --------------------------------+----------+------------------ + relation | locktype | mode +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | AccessShareLock @@ -1367,8 +1367,8 @@ COMMIT; BEGIN; INSERT INTO partitioning_locks SELECT * FROM partitioning_locks_for_select LIMIT 5; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode --------------------------------+----------+------------------ + relation | locktype | mode +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | RowExclusiveLock @@ -1381,8 +1381,8 @@ COMMIT; BEGIN; UPDATE partitioning_locks SET time = '2009-03-01'; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode --------------------------+----------+------------------ + relation | locktype | mode +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | AccessShareLock @@ -1396,8 +1396,8 @@ COMMIT; BEGIN; ALTER TABLE partitioning_locks ADD COLUMN new_column int; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode --------------------------+----------+--------------------- + relation | locktype | mode +--------------------------------------------------------------------- partitioning_locks | relation | AccessExclusiveLock partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessExclusiveLock @@ -1411,8 +1411,8 @@ COMMIT; BEGIN; TRUNCATE partitioning_locks; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode --------------------------+----------+--------------------- + relation | locktype | mode +--------------------------------------------------------------------- partitioning_locks | relation | AccessExclusiveLock partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessExclusiveLock @@ -1441,8 +1441,8 @@ WHERE pid = pg_backend_pid() ORDER BY 1, 2, 3; - logicalrelid | locktype | mode --------------------------+----------+-------------------------- + logicalrelid | locktype | mode +--------------------------------------------------------------------- partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock @@ -1475,8 +1475,8 @@ WHERE pid = pg_backend_pid() ORDER BY 1, 2, 3; - logicalrelid | locktype | mode --------------------------+----------+----------- + logicalrelid | locktype | mode +--------------------------------------------------------------------- partitioning_locks_2009 | advisory | ShareLock partitioning_locks_2009 | advisory | ShareLock partitioning_locks_2009 | advisory | ShareLock @@ -1501,8 +1501,8 @@ WHERE pid = pg_backend_pid() ORDER BY 1, 2, 3; - logicalrelid | locktype | mode --------------------------+----------+-------------------------- + logicalrelid | locktype | mode +--------------------------------------------------------------------- partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock @@ -1524,62 +1524,62 @@ CREATE TABLE partitioning_hash_join_test_0 PARTITION OF partitioning_hash_join_t CREATE TABLE partitioning_hash_join_test_1 PARTITION OF partitioning_hash_join_test FOR VALUES WITH (MODULUS 3, REMAINDER 1); CREATE TABLE partitioning_hash_join_test_2 PARTITION OF partitioning_hash_join_test FOR VALUES WITH (MODULUS 3, REMAINDER 2); SELECT create_distributed_table('partitioning_hash_join_test', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT success FROM run_command_on_workers('alter system set enable_mergejoin to off'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system set enable_nestloop to off'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system set enable_indexscan to off'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system set enable_indexonlyscan to off'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system set enable_partitionwise_join to off'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('select pg_reload_conf()'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, subid); - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Gather Workers Planned: 2 -> Parallel Hash Join @@ -1597,22 +1597,22 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, -- set partition-wise join on and parallel to off SELECT success FROM run_command_on_workers('alter system set max_parallel_workers_per_gather = 0'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system set enable_partitionwise_join to on'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('select pg_reload_conf()'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) @@ -1621,13 +1621,13 @@ SET enable_partitionwise_join TO on; ANALYZE partitioning_hash_test, partitioning_hash_join_test; EXPLAIN (COSTS OFF) SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, subid); - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Append -> Hash Join Hash Cond: ((partitioning_hash_join_test.id = partitioning_hash_test.id) AND (partitioning_hash_join_test.subid = partitioning_hash_test.subid)) @@ -1651,13 +1651,13 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, -- partitions EXPLAIN (COSTS OFF) SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id); - QUERY PLAN ---------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Hash Join Hash Cond: (partitioning_hash_join_test.id = partitioning_hash_test.id) -> Append @@ -1673,50 +1673,50 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id) -- reset partition-wise join SELECT success FROM run_command_on_workers('alter system reset enable_partitionwise_join'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system reset enable_mergejoin'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system reset enable_nestloop'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system reset enable_indexscan'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system reset enable_indexonlyscan'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system reset max_parallel_workers_per_gather'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('select pg_reload_conf()'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) @@ -1736,9 +1736,9 @@ SET citus.replication_model TO 'streaming'; CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); SELECT create_distributed_table('partitioning_test', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) DROP TABLE partitioning_test; @@ -1746,9 +1746,9 @@ DROP TABLE partitioning_test; CREATE SCHEMA partitioning_schema; CREATE TABLE partitioning_schema."schema-test"(id int, time date) PARTITION BY RANGE (time); SELECT create_distributed_table('partitioning_schema."schema-test"', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE partitioning_schema."schema-test_2009"(id int, time date); @@ -1761,8 +1761,8 @@ FROM WHERE logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass) ORDER BY 1; - logicalrelid ----------------------------------------- + logicalrelid +--------------------------------------------------------------------- partitioning_schema."schema-test" partitioning_schema."schema-test_2009" (2 rows) @@ -1777,8 +1777,8 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count -----------------------------------------+------- + logicalrelid | count +--------------------------------------------------------------------- partitioning_schema."schema-test" | 4 partitioning_schema."schema-test_2009" | 4 (2 rows) @@ -1787,9 +1787,9 @@ DROP TABLE partitioning_schema."schema-test"; -- make sure we can create partition of a distributed table in a schema CREATE TABLE partitioning_schema."schema-test"(id int, time date) PARTITION BY RANGE (time); SELECT create_distributed_table('partitioning_schema."schema-test"', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE partitioning_schema."schema-test_2009" PARTITION OF partitioning_schema."schema-test" FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); @@ -1801,8 +1801,8 @@ FROM WHERE logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass) ORDER BY 1; - logicalrelid ----------------------------------------- + logicalrelid +--------------------------------------------------------------------- partitioning_schema."schema-test" partitioning_schema."schema-test_2009" (2 rows) @@ -1817,8 +1817,8 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count -----------------------------------------+------- + logicalrelid | count +--------------------------------------------------------------------- partitioning_schema."schema-test" | 4 partitioning_schema."schema-test_2009" | 4 (2 rows) @@ -1828,9 +1828,9 @@ DROP TABLE partitioning_schema."schema-test"; CREATE TABLE partitioning_schema."schema-test"(id int, time date) PARTITION BY RANGE (time); SET search_path = partitioning_schema; SELECT create_distributed_table('"schema-test"', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE partitioning_schema."schema-test_2009" PARTITION OF "schema-test" FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); @@ -1842,8 +1842,8 @@ FROM WHERE logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass) ORDER BY 1; - logicalrelid --------------------- + logicalrelid +--------------------------------------------------------------------- "schema-test" "schema-test_2009" (2 rows) @@ -1858,8 +1858,8 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count ---------------------+------- + logicalrelid | count +--------------------------------------------------------------------- "schema-test" | 4 "schema-test_2009" | 4 (2 rows) @@ -1868,16 +1868,16 @@ ORDER BY -- tables with foreign keys CREATE TABLE reference_table(id int PRIMARY KEY); SELECT create_reference_table('reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE reference_table_2(id int PRIMARY KEY); SELECT create_reference_table('reference_table_2'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); @@ -1887,27 +1887,27 @@ CREATE TABLE partitioning_test_2010 (LIKE partitioning_test); CREATE TABLE partitioning_test_2011 (LIKE partitioning_test); -- distributing partitioning_test will also distribute partitioning_test_2008 SELECT create_distributed_table('partitioning_test', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('partitioning_test_2009', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('partitioning_test_2010', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('partitioning_test_2011', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE partitioning_test ADD CONSTRAINT partitioning_reference_fkey @@ -1919,8 +1919,8 @@ INSERT INTO partitioning_test_2010 VALUES (1, '2010-02-01'); ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2010 FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); ERROR: insert or update on table "partitioning_test_2010_1660191" violates foreign key constraint "partitioning_reference_fkey_1660179" -DETAIL: Key (id)=(1) is not present in table "reference_table_1660177". -CONTEXT: while executing command on localhost:57637 +DETAIL: Key (id)=(X) is not present in table "reference_table_1660177". +CONTEXT: while executing command on localhost:xxxxx -- Truncate, so attaching again won't fail TRUNCATE partitioning_test_2010; -- Attach a table which already has the same constraint diff --git a/src/test/regress/expected/multi_partitioning_utils.out b/src/test/regress/expected/multi_partitioning_utils.out index 9ef8f9016..68fa39791 100644 --- a/src/test/regress/expected/multi_partitioning_utils.out +++ b/src/test/regress/expected/multi_partitioning_utils.out @@ -74,23 +74,23 @@ $function$; CREATE TABLE date_partitioned_table(id int, time date) PARTITION BY RANGE (time); -- we should be able to get the partitioning information even if there are no partitions SELECT generate_partition_information('date_partitioned_table'); - generate_partition_information --------------------------------- + generate_partition_information +--------------------------------------------------------------------- RANGE ("time") (1 row) -- we should be able to drop and re-create the partitioned table using the command that Citus generate SELECT drop_and_recreate_partitioned_table('date_partitioned_table'); - drop_and_recreate_partitioned_table -------------------------------------- - + drop_and_recreate_partitioned_table +--------------------------------------------------------------------- + (1 row) -- we should also be able to see the PARTITION BY ... for the parent table SELECT master_get_table_ddl_events('date_partitioned_table'); - master_get_table_ddl_events ---------------------------------------------------------------------------------------------------- - CREATE TABLE public.date_partitioned_table (id integer, "time" date) PARTITION BY RANGE ("time") + master_get_table_ddl_events +--------------------------------------------------------------------- + CREATE TABLE public.date_partitioned_table (id integer, "time" date) PARTITION BY RANGE ("time") ALTER TABLE public.date_partitioned_table OWNER TO postgres (2 rows) @@ -99,48 +99,48 @@ CREATE TABLE date_partition_2006 PARTITION OF date_partitioned_table FOR VALUES CREATE TABLE date_partition_2007 PARTITION OF date_partitioned_table FOR VALUES FROM ('2007-01-01') TO ('2008-01-01'); -- we should be able to get the partitioning information after the partitions are created SELECT generate_partition_information('date_partitioned_table'); - generate_partition_information --------------------------------- + generate_partition_information +--------------------------------------------------------------------- RANGE ("time") (1 row) -- lets get the attach partition commands SELECT generate_alter_table_attach_partition_command('date_partition_2006'); - generate_alter_table_attach_partition_command ------------------------------------------------------------------------------------------------------------------------------------------ + generate_alter_table_attach_partition_command +--------------------------------------------------------------------- ALTER TABLE public.date_partitioned_table ATTACH PARTITION public.date_partition_2006 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007'); (1 row) SELECT generate_alter_table_attach_partition_command('date_partition_2007'); - generate_alter_table_attach_partition_command ------------------------------------------------------------------------------------------------------------------------------------------ + generate_alter_table_attach_partition_command +--------------------------------------------------------------------- ALTER TABLE public.date_partitioned_table ATTACH PARTITION public.date_partition_2007 FOR VALUES FROM ('01-01-2007') TO ('01-01-2008'); (1 row) -- detach and attach the partition by the command generated by us \d+ date_partitioned_table Table "public.date_partitioned_table" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - id | integer | | | | plain | | - time | date | | | | plain | | + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------------------------------------------------------------------- + id | integer | | | | plain | | + time | date | | | | plain | | Partition key: RANGE ("time") Partitions: date_partition_2006 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007'), date_partition_2007 FOR VALUES FROM ('01-01-2007') TO ('01-01-2008') SELECT detach_and_attach_partition('date_partition_2007', 'date_partitioned_table'); - detach_and_attach_partition ------------------------------ - + detach_and_attach_partition +--------------------------------------------------------------------- + (1 row) -- check that both partitions are visiable \d+ date_partitioned_table Table "public.date_partitioned_table" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - id | integer | | | | plain | | - time | date | | | | plain | | + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------------------------------------------------------------------- + id | integer | | | | plain | | + time | date | | | | plain | | Partition key: RANGE ("time") Partitions: date_partition_2006 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007'), date_partition_2007 FOR VALUES FROM ('01-01-2007') TO ('01-01-2008') @@ -150,52 +150,52 @@ Partitions: date_partition_2006 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007') CREATE TABLE date_partitioned_table_100 (id int, time date) PARTITION BY RANGE (time); CREATE TABLE date_partition_2007_100 (id int, time date ); -- now create the partitioning hierarcy -SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public', - referenced_shard:=100, referenced_schema_name:='public', +SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public', + referenced_shard:=100, referenced_schema_name:='public', command:='ALTER TABLE date_partitioned_table ATTACH PARTITION date_partition_2007 FOR VALUES FROM (''2007-01-01'') TO (''2008-01-02'')' ); - worker_apply_inter_shard_ddl_command --------------------------------------- - + worker_apply_inter_shard_ddl_command +--------------------------------------------------------------------- + (1 row) -- the hierarcy is successfully created \d+ date_partitioned_table_100 Table "public.date_partitioned_table_100" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - id | integer | | | | plain | | - time | date | | | | plain | | + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------------------------------------------------------------------- + id | integer | | | | plain | | + time | date | | | | plain | | Partition key: RANGE ("time") Partitions: date_partition_2007_100 FOR VALUES FROM ('01-01-2007') TO ('01-02-2008') -- Citus can also get the DDL events for the partitions as regular tables SELECT master_get_table_ddl_events('date_partition_2007_100'); - master_get_table_ddl_events ------------------------------------------------------------------------ + master_get_table_ddl_events +--------------------------------------------------------------------- CREATE TABLE public.date_partition_2007_100 (id integer, "time" date) ALTER TABLE public.date_partition_2007_100 OWNER TO postgres (2 rows) -- now break the partitioning hierarcy -SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public', - referenced_shard:=100, referenced_schema_name:='public', +SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public', + referenced_shard:=100, referenced_schema_name:='public', command:='ALTER TABLE date_partitioned_table DETACH PARTITION date_partition_2007' ); - worker_apply_inter_shard_ddl_command --------------------------------------- - + worker_apply_inter_shard_ddl_command +--------------------------------------------------------------------- + (1 row) -- the hierarcy is successfully broken \d+ date_partitioned_table_100 Table "public.date_partitioned_table_100" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - id | integer | | | | plain | | - time | date | | | | plain | | + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------------------------------------------------------------------- + id | integer | | | | plain | | + time | date | | | | plain | | Partition key: RANGE ("time") Number of partitions: 0 --- now lets have some more complex partitioning hierarcies with +-- now lets have some more complex partitioning hierarcies with -- tables on different schemas and constraints on the tables CREATE SCHEMA partition_parent_schema; CREATE TABLE partition_parent_schema.parent_table (id int NOT NULL, time date DEFAULT now()) PARTITION BY RANGE (time); @@ -205,38 +205,38 @@ CREATE SCHEMA partition_child_2_schema; CREATE TABLE partition_child_2_schema.child_2 (id int NOT NULL, time date ); -- we should be able to get the partitioning information even if there are no partitions SELECT generate_partition_information('partition_parent_schema.parent_table'); - generate_partition_information --------------------------------- + generate_partition_information +--------------------------------------------------------------------- RANGE ("time") (1 row) -- we should be able to drop and re-create the partitioned table using the command that Citus generate SELECT drop_and_recreate_partitioned_table('partition_parent_schema.parent_table'); - drop_and_recreate_partitioned_table -------------------------------------- - + drop_and_recreate_partitioned_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_1_schema.child_1 FOR VALUES FROM ('2009-01-01') TO ('2010-01-02'); SET search_path = 'partition_parent_schema'; ALTER TABLE parent_table ATTACH PARTITION partition_child_2_schema.child_2 FOR VALUES FROM ('2006-01-01') TO ('2007-01-01'); SELECT public.generate_partition_information('parent_table'); - generate_partition_information --------------------------------- + generate_partition_information +--------------------------------------------------------------------- RANGE ("time") (1 row) -- lets get the attach partition commands SELECT public.generate_alter_table_attach_partition_command('partition_child_1_schema.child_1'); - generate_alter_table_attach_partition_command ------------------------------------------------------------------------------------------------------------------------------------------------------- + generate_alter_table_attach_partition_command +--------------------------------------------------------------------- ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_1_schema.child_1 FOR VALUES FROM ('01-01-2009') TO ('01-02-2010'); (1 row) SET search_path = 'partition_child_2_schema'; SELECT public.generate_alter_table_attach_partition_command('child_2'); - generate_alter_table_attach_partition_command ------------------------------------------------------------------------------------------------------------------------------------------------------- + generate_alter_table_attach_partition_command +--------------------------------------------------------------------- ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_2_schema.child_2 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007'); (1 row) @@ -244,27 +244,27 @@ SET search_path = 'partition_parent_schema'; -- detach and attach the partition by the command generated by us \d+ parent_table Table "partition_parent_schema.parent_table" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - id | integer | | not null | | plain | | - time | date | | | now() | plain | | + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------------------------------------------------------------------- + id | integer | | not null | | plain | | + time | date | | | now() | plain | | Partition key: RANGE ("time") Partitions: partition_child_1_schema.child_1 FOR VALUES FROM ('01-01-2009') TO ('01-02-2010'), partition_child_2_schema.child_2 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007') SELECT public.detach_and_attach_partition('partition_child_1_schema.child_1', 'parent_table'); - detach_and_attach_partition ------------------------------ - + detach_and_attach_partition +--------------------------------------------------------------------- + (1 row) -- check that both partitions are visiable \d+ parent_table Table "partition_parent_schema.parent_table" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - id | integer | | not null | | plain | | - time | date | | | now() | plain | | + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------------------------------------------------------------------- + id | integer | | not null | | plain | | + time | date | | | now() | plain | | Partition key: RANGE ("time") Partitions: partition_child_1_schema.child_1 FOR VALUES FROM ('01-01-2009') TO ('01-02-2010'), partition_child_2_schema.child_2 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007') @@ -278,8 +278,8 @@ SELECT public.print_partitions('partition_child_1_schema.child_1'); ERROR: "child_1" is not a parent table -- now pring the partitions SELECT public.print_partitions('parent_table'); - print_partitions ------------------- + print_partitions +--------------------------------------------------------------------- child_1,child_2 (1 row) @@ -309,70 +309,70 @@ CREATE TABLE multi_column_partition_2( ); -- partitioning information SELECT generate_partition_information('multi_column_partitioned'); - generate_partition_information ------------------------------------------------------ + generate_partition_information +--------------------------------------------------------------------- RANGE (a, (((a + b) + 1)), some_function(upper(c))) (1 row) SELECT master_get_table_ddl_events('multi_column_partitioned'); - master_get_table_ddl_events ------------------------------------------------------------------------------------------------------------------------------------------------------- - CREATE TABLE public.multi_column_partitioned (a integer, b integer, c text) PARTITION BY RANGE (a, (((a + b) + 1)), public.some_function(upper(c))) + master_get_table_ddl_events +--------------------------------------------------------------------- + CREATE TABLE public.multi_column_partitioned (a integer, b integer, c text) PARTITION BY RANGE (a, (((a + b) + 1)), public.some_function(upper(c))) ALTER TABLE public.multi_column_partitioned OWNER TO postgres (2 rows) SELECT drop_and_recreate_partitioned_table('multi_column_partitioned'); - drop_and_recreate_partitioned_table -------------------------------------- - + drop_and_recreate_partitioned_table +--------------------------------------------------------------------- + (1 row) -- partitions and their ranges ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_1 FOR VALUES FROM (1, 10, '250') TO (1, 20, '250'); SELECT generate_alter_table_attach_partition_command('multi_column_partition_1'); - generate_alter_table_attach_partition_command ------------------------------------------------------------------------------------------------------------------------------------------------- + generate_alter_table_attach_partition_command +--------------------------------------------------------------------- ALTER TABLE public.multi_column_partitioned ATTACH PARTITION public.multi_column_partition_1 FOR VALUES FROM (1, 10, '250') TO (1, 20, '250'); (1 row) ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_2 FOR VALUES FROM (10, 1000, '2500') TO (MAXVALUE, MAXVALUE, MAXVALUE); SELECT generate_alter_table_attach_partition_command('multi_column_partition_2'); - generate_alter_table_attach_partition_command --------------------------------------------------------------------------------------------------------------------------------------------------------------------- + generate_alter_table_attach_partition_command +--------------------------------------------------------------------- ALTER TABLE public.multi_column_partitioned ATTACH PARTITION public.multi_column_partition_2 FOR VALUES FROM (10, 1000, '2500') TO (MAXVALUE, MAXVALUE, MAXVALUE); (1 row) SELECT generate_alter_table_detach_partition_command('multi_column_partition_2'); - generate_alter_table_detach_partition_command ---------------------------------------------------------------------------------------------------------- + generate_alter_table_detach_partition_command +--------------------------------------------------------------------- ALTER TABLE IF EXISTS public.multi_column_partitioned DETACH PARTITION public.multi_column_partition_2; (1 row) -- finally a test with LIST partitioning CREATE TABLE list_partitioned (col1 NUMERIC, col2 NUMERIC, col3 VARCHAR(10)) PARTITION BY LIST (col1) ; SELECT generate_partition_information('list_partitioned'); - generate_partition_information --------------------------------- + generate_partition_information +--------------------------------------------------------------------- LIST (col1) (1 row) SELECT master_get_table_ddl_events('list_partitioned'); - master_get_table_ddl_events -------------------------------------------------------------------------------------------------------------------------- - CREATE TABLE public.list_partitioned (col1 numeric, col2 numeric, col3 character varying(10)) PARTITION BY LIST (col1) + master_get_table_ddl_events +--------------------------------------------------------------------- + CREATE TABLE public.list_partitioned (col1 numeric, col2 numeric, col3 character varying(10)) PARTITION BY LIST (col1) ALTER TABLE public.list_partitioned OWNER TO postgres (2 rows) SELECT drop_and_recreate_partitioned_table('list_partitioned'); - drop_and_recreate_partitioned_table -------------------------------------- - + drop_and_recreate_partitioned_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE list_partitioned_1 PARTITION OF list_partitioned FOR VALUES IN (100, 101, 102, 103, 104); SELECT generate_alter_table_attach_partition_command('list_partitioned_1'); - generate_alter_table_attach_partition_command ------------------------------------------------------------------------------------------------------------------------------------ + generate_alter_table_attach_partition_command +--------------------------------------------------------------------- ALTER TABLE public.list_partitioned ATTACH PARTITION public.list_partitioned_1 FOR VALUES IN ('100', '101', '102', '103', '104'); (1 row) @@ -387,31 +387,31 @@ CREATE TABLE capitals ( ) INHERITS (cities); -- returns true since capitals inherits from cities SELECT table_inherits('capitals'); - table_inherits ----------------- + table_inherits +--------------------------------------------------------------------- t (1 row) --- although date_partition_2006 inherits from its parent +-- although date_partition_2006 inherits from its parent -- returns false since the hierarcy is formed via partitioning SELECT table_inherits('date_partition_2006'); - table_inherits ----------------- + table_inherits +--------------------------------------------------------------------- f (1 row) -- returns true since cities inherited by capitals SELECT table_inherited('cities'); - table_inherited ------------------ + table_inherited +--------------------------------------------------------------------- t (1 row) --- although date_partitioned_table inherited by its partitions +-- although date_partitioned_table inherited by its partitions -- returns false since the hierarcy is formed via partitioning SELECT table_inherited('date_partitioned_table'); - table_inherited ------------------ + table_inherited +--------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/multi_prepare_plsql.out b/src/test/regress/expected/multi_prepare_plsql.out index 0e168bbdc..b8e8608dc 100644 --- a/src/test/regress/expected/multi_prepare_plsql.out +++ b/src/test/regress/expected/multi_prepare_plsql.out @@ -5,8 +5,8 @@ -- and converted into both plain SQL and PL/pgsql functions, which -- use prepared statements internally. -- many of the tests in this file is intended for testing non-fast-path --- router planner, so we're explicitly disabling it in this file. --- We've bunch of other tests that triggers fast-path-router +-- router planner, so we're explicitly disabling it in this file. +-- We've bunch of other tests that triggers fast-path-router SET citus.enable_fast_path_router_planner TO false; CREATE FUNCTION plpgsql_test_1() RETURNS TABLE(count bigint) AS $$ DECLARE @@ -141,112 +141,112 @@ SET citus.task_executor_type TO 'task-tracker'; SET client_min_messages TO INFO; -- now, run PL/pgsql functions SELECT plpgsql_test_1(); - plpgsql_test_1 ----------------- + plpgsql_test_1 +--------------------------------------------------------------------- 2985 (1 row) SELECT plpgsql_test_2(); - plpgsql_test_2 ----------------- + plpgsql_test_2 +--------------------------------------------------------------------- 12000 (1 row) SELECT plpgsql_test_3(); - plpgsql_test_3 ----------------- + plpgsql_test_3 +--------------------------------------------------------------------- 1956 (1 row) SELECT plpgsql_test_4(); - plpgsql_test_4 ----------------- + plpgsql_test_4 +--------------------------------------------------------------------- 7806 (1 row) SELECT plpgsql_test_5(); - plpgsql_test_5 ----------------- + plpgsql_test_5 +--------------------------------------------------------------------- 39 (1 row) -- run PL/pgsql functions with different parameters SELECT plpgsql_test_6(155); - plpgsql_test_6 ----------------- + plpgsql_test_6 +--------------------------------------------------------------------- 11813 (1 row) SELECT plpgsql_test_6(1555); - plpgsql_test_6 ----------------- + plpgsql_test_6 +--------------------------------------------------------------------- 10185 (1 row) SELECT plpgsql_test_7('UNITED KINGDOM', 'CHINA'); - plpgsql_test_7 ----------------------------------------- + plpgsql_test_7 +--------------------------------------------------------------------- ("UNITED KINGDOM",CHINA,1996,18560.22) (1 row) SELECT plpgsql_test_7('FRANCE', 'GERMANY'); - plpgsql_test_7 ---------------------------------- + plpgsql_test_7 +--------------------------------------------------------------------- (GERMANY,FRANCE,1995,2399.2948) (1 row) -- now, PL/pgsql functions with random order SELECT plpgsql_test_6(155); - plpgsql_test_6 ----------------- + plpgsql_test_6 +--------------------------------------------------------------------- 11813 (1 row) SELECT plpgsql_test_3(); - plpgsql_test_3 ----------------- + plpgsql_test_3 +--------------------------------------------------------------------- 1956 (1 row) SELECT plpgsql_test_7('FRANCE', 'GERMANY'); - plpgsql_test_7 ---------------------------------- + plpgsql_test_7 +--------------------------------------------------------------------- (GERMANY,FRANCE,1995,2399.2948) (1 row) SELECT plpgsql_test_5(); - plpgsql_test_5 ----------------- + plpgsql_test_5 +--------------------------------------------------------------------- 39 (1 row) SELECT plpgsql_test_1(); - plpgsql_test_1 ----------------- + plpgsql_test_1 +--------------------------------------------------------------------- 2985 (1 row) SELECT plpgsql_test_6(1555); - plpgsql_test_6 ----------------- + plpgsql_test_6 +--------------------------------------------------------------------- 10185 (1 row) SELECT plpgsql_test_4(); - plpgsql_test_4 ----------------- + plpgsql_test_4 +--------------------------------------------------------------------- 7806 (1 row) SELECT plpgsql_test_7('UNITED KINGDOM', 'CHINA'); - plpgsql_test_7 ----------------------------------------- + plpgsql_test_7 +--------------------------------------------------------------------- ("UNITED KINGDOM",CHINA,1996,18560.22) (1 row) SELECT plpgsql_test_2(); - plpgsql_test_2 ----------------- + plpgsql_test_2 +--------------------------------------------------------------------- 12000 (1 row) @@ -255,27 +255,27 @@ SELECT plpgsql_test_2(); RESET citus.task_executor_type; -- now, run PL/pgsql functions SELECT plpgsql_test_1(); - plpgsql_test_1 ----------------- + plpgsql_test_1 +--------------------------------------------------------------------- 2985 (1 row) SELECT plpgsql_test_2(); - plpgsql_test_2 ----------------- + plpgsql_test_2 +--------------------------------------------------------------------- 12000 (1 row) -- run PL/pgsql functions with different parameters SELECT plpgsql_test_6(155); - plpgsql_test_6 ----------------- + plpgsql_test_6 +--------------------------------------------------------------------- 11813 (1 row) SELECT plpgsql_test_6(1555); - plpgsql_test_6 ----------------- + plpgsql_test_6 +--------------------------------------------------------------------- 10185 (1 row) @@ -286,9 +286,9 @@ CREATE TABLE plpgsql_table ( ); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('plpgsql_table','key','hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE FUNCTION no_parameter_insert() RETURNS void as $$ @@ -298,39 +298,39 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT no_parameter_insert(); - no_parameter_insert ---------------------- - + no_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT no_parameter_insert(); - no_parameter_insert ---------------------- - + no_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT no_parameter_insert(); - no_parameter_insert ---------------------- - + no_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT no_parameter_insert(); - no_parameter_insert ---------------------- - + no_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT no_parameter_insert(); - no_parameter_insert ---------------------- - + no_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT no_parameter_insert(); - no_parameter_insert ---------------------- - + no_parameter_insert +--------------------------------------------------------------------- + (1 row) CREATE FUNCTION single_parameter_insert(key_arg int) @@ -341,39 +341,39 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT single_parameter_insert(1); - single_parameter_insert -------------------------- - + single_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT single_parameter_insert(2); - single_parameter_insert -------------------------- - + single_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT single_parameter_insert(3); - single_parameter_insert -------------------------- - + single_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT single_parameter_insert(4); - single_parameter_insert -------------------------- - + single_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT single_parameter_insert(5); - single_parameter_insert -------------------------- - + single_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT single_parameter_insert(6); - single_parameter_insert -------------------------- - + single_parameter_insert +--------------------------------------------------------------------- + (1 row) CREATE FUNCTION double_parameter_insert(key_arg int, value_arg int) @@ -384,39 +384,39 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT double_parameter_insert(1, 10); - double_parameter_insert -------------------------- - + double_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT double_parameter_insert(2, 20); - double_parameter_insert -------------------------- - + double_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT double_parameter_insert(3, 30); - double_parameter_insert -------------------------- - + double_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT double_parameter_insert(4, 40); - double_parameter_insert -------------------------- - + double_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT double_parameter_insert(5, 50); - double_parameter_insert -------------------------- - + double_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT double_parameter_insert(6, 60); - double_parameter_insert -------------------------- - + double_parameter_insert +--------------------------------------------------------------------- + (1 row) CREATE FUNCTION non_partition_parameter_insert(value_arg int) @@ -427,69 +427,69 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_insert(10); - non_partition_parameter_insert --------------------------------- - + non_partition_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_insert(20); - non_partition_parameter_insert --------------------------------- - + non_partition_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_insert(30); - non_partition_parameter_insert --------------------------------- - + non_partition_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_insert(40); - non_partition_parameter_insert --------------------------------- - + non_partition_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_insert(50); - non_partition_parameter_insert --------------------------------- - + non_partition_parameter_insert +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_insert(60); - non_partition_parameter_insert --------------------------------- - + non_partition_parameter_insert +--------------------------------------------------------------------- + (1 row) -- check inserted values SELECT * FROM plpgsql_table ORDER BY key, value; - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 0 | 20 0 | 30 0 | 40 0 | 50 0 | 60 - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | 1 | 10 - 1 | + 1 | 2 | 20 - 2 | + 2 | 3 | 30 - 3 | + 3 | 4 | 40 - 4 | + 4 | 5 | 50 - 5 | + 5 | 6 | 60 - 6 | + 6 | (24 rows) -- check router executor select @@ -512,43 +512,43 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT router_partition_column_select(1); - router_partition_column_select --------------------------------- + router_partition_column_select +--------------------------------------------------------------------- (1,10) (1,) (2 rows) SELECT router_partition_column_select(2); - router_partition_column_select --------------------------------- + router_partition_column_select +--------------------------------------------------------------------- (2,20) (2,) (2 rows) SELECT router_partition_column_select(3); - router_partition_column_select --------------------------------- + router_partition_column_select +--------------------------------------------------------------------- (3,30) (3,) (2 rows) SELECT router_partition_column_select(4); - router_partition_column_select --------------------------------- + router_partition_column_select +--------------------------------------------------------------------- (4,40) (4,) (2 rows) SELECT router_partition_column_select(5); - router_partition_column_select --------------------------------- + router_partition_column_select +--------------------------------------------------------------------- (5,50) (5,) (2 rows) SELECT router_partition_column_select(6); - router_partition_column_select --------------------------------- + router_partition_column_select +--------------------------------------------------------------------- (6,60) (6,) (2 rows) @@ -573,38 +573,38 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT router_non_partition_column_select(10); - router_non_partition_column_select ------------------------------------- + router_non_partition_column_select +--------------------------------------------------------------------- (0,10) (1 row) SELECT router_non_partition_column_select(20); - router_non_partition_column_select ------------------------------------- + router_non_partition_column_select +--------------------------------------------------------------------- (0,20) (1 row) SELECT router_non_partition_column_select(30); - router_non_partition_column_select ------------------------------------- + router_non_partition_column_select +--------------------------------------------------------------------- (0,30) (1 row) SELECT router_non_partition_column_select(40); - router_non_partition_column_select ------------------------------------- + router_non_partition_column_select +--------------------------------------------------------------------- (0,40) (1 row) SELECT router_non_partition_column_select(50); - router_non_partition_column_select ------------------------------------- + router_non_partition_column_select +--------------------------------------------------------------------- (0,50) (1 row) SELECT router_non_partition_column_select(60); - router_non_partition_column_select ------------------------------------- + router_non_partition_column_select +--------------------------------------------------------------------- (0,60) (1 row) @@ -628,43 +628,43 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT real_time_non_partition_column_select(10); - real_time_non_partition_column_select ---------------------------------------- + real_time_non_partition_column_select +--------------------------------------------------------------------- (0,10) (1,10) (2 rows) SELECT real_time_non_partition_column_select(20); - real_time_non_partition_column_select ---------------------------------------- + real_time_non_partition_column_select +--------------------------------------------------------------------- (0,20) (2,20) (2 rows) SELECT real_time_non_partition_column_select(30); - real_time_non_partition_column_select ---------------------------------------- + real_time_non_partition_column_select +--------------------------------------------------------------------- (0,30) (3,30) (2 rows) SELECT real_time_non_partition_column_select(40); - real_time_non_partition_column_select ---------------------------------------- + real_time_non_partition_column_select +--------------------------------------------------------------------- (0,40) (4,40) (2 rows) SELECT real_time_non_partition_column_select(50); - real_time_non_partition_column_select ---------------------------------------- + real_time_non_partition_column_select +--------------------------------------------------------------------- (0,50) (5,50) (2 rows) SELECT real_time_non_partition_column_select(60); - real_time_non_partition_column_select ---------------------------------------- + real_time_non_partition_column_select +--------------------------------------------------------------------- (0,60) (6,60) (2 rows) @@ -689,16 +689,16 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT real_time_partition_column_select(1); - real_time_partition_column_select ------------------------------------ + real_time_partition_column_select +--------------------------------------------------------------------- (0,10) (1,10) (1,) (3 rows) SELECT real_time_partition_column_select(2); - real_time_partition_column_select ------------------------------------ + real_time_partition_column_select +--------------------------------------------------------------------- (0,10) (1,10) (2,20) @@ -706,8 +706,8 @@ SELECT real_time_partition_column_select(2); (4 rows) SELECT real_time_partition_column_select(3); - real_time_partition_column_select ------------------------------------ + real_time_partition_column_select +--------------------------------------------------------------------- (0,10) (1,10) (3,30) @@ -715,8 +715,8 @@ SELECT real_time_partition_column_select(3); (4 rows) SELECT real_time_partition_column_select(4); - real_time_partition_column_select ------------------------------------ + real_time_partition_column_select +--------------------------------------------------------------------- (0,10) (1,10) (4,40) @@ -724,8 +724,8 @@ SELECT real_time_partition_column_select(4); (4 rows) SELECT real_time_partition_column_select(5); - real_time_partition_column_select ------------------------------------ + real_time_partition_column_select +--------------------------------------------------------------------- (0,10) (1,10) (5,50) @@ -733,8 +733,8 @@ SELECT real_time_partition_column_select(5); (4 rows) SELECT real_time_partition_column_select(6); - real_time_partition_column_select ------------------------------------ + real_time_partition_column_select +--------------------------------------------------------------------- (0,10) (1,10) (6,60) @@ -762,43 +762,43 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT task_tracker_non_partition_column_select(10); - task_tracker_non_partition_column_select ------------------------------------------- + task_tracker_non_partition_column_select +--------------------------------------------------------------------- (0,10) (1,10) (2 rows) SELECT task_tracker_non_partition_column_select(20); - task_tracker_non_partition_column_select ------------------------------------------- + task_tracker_non_partition_column_select +--------------------------------------------------------------------- (0,20) (2,20) (2 rows) SELECT task_tracker_non_partition_column_select(30); - task_tracker_non_partition_column_select ------------------------------------------- + task_tracker_non_partition_column_select +--------------------------------------------------------------------- (0,30) (3,30) (2 rows) SELECT task_tracker_non_partition_column_select(40); - task_tracker_non_partition_column_select ------------------------------------------- + task_tracker_non_partition_column_select +--------------------------------------------------------------------- (0,40) (4,40) (2 rows) SELECT task_tracker_non_partition_column_select(50); - task_tracker_non_partition_column_select ------------------------------------------- + task_tracker_non_partition_column_select +--------------------------------------------------------------------- (0,50) (5,50) (2 rows) SELECT real_time_non_partition_column_select(60); - real_time_non_partition_column_select ---------------------------------------- + real_time_non_partition_column_select +--------------------------------------------------------------------- (0,60) (6,60) (2 rows) @@ -823,16 +823,16 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT task_tracker_partition_column_select(1); - task_tracker_partition_column_select --------------------------------------- + task_tracker_partition_column_select +--------------------------------------------------------------------- (0,10) (1,10) (1,) (3 rows) SELECT task_tracker_partition_column_select(2); - task_tracker_partition_column_select --------------------------------------- + task_tracker_partition_column_select +--------------------------------------------------------------------- (0,10) (1,10) (2,20) @@ -840,8 +840,8 @@ SELECT task_tracker_partition_column_select(2); (4 rows) SELECT task_tracker_partition_column_select(3); - task_tracker_partition_column_select --------------------------------------- + task_tracker_partition_column_select +--------------------------------------------------------------------- (0,10) (1,10) (3,30) @@ -849,8 +849,8 @@ SELECT task_tracker_partition_column_select(3); (4 rows) SELECT task_tracker_partition_column_select(4); - task_tracker_partition_column_select --------------------------------------- + task_tracker_partition_column_select +--------------------------------------------------------------------- (0,10) (1,10) (4,40) @@ -858,8 +858,8 @@ SELECT task_tracker_partition_column_select(4); (4 rows) SELECT task_tracker_partition_column_select(5); - task_tracker_partition_column_select --------------------------------------- + task_tracker_partition_column_select +--------------------------------------------------------------------- (0,10) (1,10) (5,50) @@ -867,8 +867,8 @@ SELECT task_tracker_partition_column_select(5); (4 rows) SELECT task_tracker_partition_column_select(6); - task_tracker_partition_column_select --------------------------------------- + task_tracker_partition_column_select +--------------------------------------------------------------------- (0,10) (1,10) (6,60) @@ -884,39 +884,39 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT partition_parameter_update(1, 11); - partition_parameter_update ----------------------------- - + partition_parameter_update +--------------------------------------------------------------------- + (1 row) SELECT partition_parameter_update(2, 21); - partition_parameter_update ----------------------------- - + partition_parameter_update +--------------------------------------------------------------------- + (1 row) SELECT partition_parameter_update(3, 31); - partition_parameter_update ----------------------------- - + partition_parameter_update +--------------------------------------------------------------------- + (1 row) SELECT partition_parameter_update(4, 41); - partition_parameter_update ----------------------------- - + partition_parameter_update +--------------------------------------------------------------------- + (1 row) SELECT partition_parameter_update(5, 51); - partition_parameter_update ----------------------------- - + partition_parameter_update +--------------------------------------------------------------------- + (1 row) SELECT partition_parameter_update(6, 61); - partition_parameter_update ----------------------------- - + partition_parameter_update +--------------------------------------------------------------------- + (1 row) CREATE FUNCTION non_partition_parameter_update(int, int) RETURNS void as $$ @@ -926,57 +926,57 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_update(10, 12); - non_partition_parameter_update --------------------------------- - + non_partition_parameter_update +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_update(20, 22); - non_partition_parameter_update --------------------------------- - + non_partition_parameter_update +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_update(30, 32); - non_partition_parameter_update --------------------------------- - + non_partition_parameter_update +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_update(40, 42); - non_partition_parameter_update --------------------------------- - + non_partition_parameter_update +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_update(50, 52); - non_partition_parameter_update --------------------------------- - + non_partition_parameter_update +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_update(60, 62); - non_partition_parameter_update --------------------------------- - + non_partition_parameter_update +--------------------------------------------------------------------- + (1 row) -- check table after updates SELECT * FROM plpgsql_table ORDER BY key, value; - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 12 0 | 22 0 | 32 0 | 42 0 | 52 0 | 62 - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | 1 | 11 1 | 11 2 | 21 @@ -999,39 +999,39 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT partition_parameter_delete(1, 11); - partition_parameter_delete ----------------------------- - + partition_parameter_delete +--------------------------------------------------------------------- + (1 row) SELECT partition_parameter_delete(2, 21); - partition_parameter_delete ----------------------------- - + partition_parameter_delete +--------------------------------------------------------------------- + (1 row) SELECT partition_parameter_delete(3, 31); - partition_parameter_delete ----------------------------- - + partition_parameter_delete +--------------------------------------------------------------------- + (1 row) SELECT partition_parameter_delete(4, 41); - partition_parameter_delete ----------------------------- - + partition_parameter_delete +--------------------------------------------------------------------- + (1 row) SELECT partition_parameter_delete(5, 51); - partition_parameter_delete ----------------------------- - + partition_parameter_delete +--------------------------------------------------------------------- + (1 row) SELECT partition_parameter_delete(6, 61); - partition_parameter_delete ----------------------------- - + partition_parameter_delete +--------------------------------------------------------------------- + (1 row) CREATE FUNCTION non_partition_parameter_delete(int) RETURNS void as $$ @@ -1041,59 +1041,59 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_delete(12); - non_partition_parameter_delete --------------------------------- - + non_partition_parameter_delete +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_delete(22); - non_partition_parameter_delete --------------------------------- - + non_partition_parameter_delete +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_delete(32); - non_partition_parameter_delete --------------------------------- - + non_partition_parameter_delete +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_delete(42); - non_partition_parameter_delete --------------------------------- - + non_partition_parameter_delete +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_delete(52); - non_partition_parameter_delete --------------------------------- - + non_partition_parameter_delete +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_delete(62); - non_partition_parameter_delete --------------------------------- - + non_partition_parameter_delete +--------------------------------------------------------------------- + (1 row) -- check table after deletes SELECT * FROM plpgsql_table ORDER BY key, value; - key | value ------+------- - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + key | value +--------------------------------------------------------------------- + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | (6 rows) -- check whether we can handle execute parameters CREATE TABLE execute_parameter_test (key int, val date); SELECT create_distributed_table('execute_parameter_test', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) DO $$ @@ -1112,9 +1112,9 @@ CREATE TABLE func_parameter_test ( PRIMARY KEY (key, seq) ); SELECT create_distributed_table('func_parameter_test', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE OR REPLACE FUNCTION insert_with_max(pkey text) RETURNS VOID AS @@ -1135,44 +1135,44 @@ $BODY$ $BODY$ LANGUAGE plpgsql; SELECT insert_with_max('key'); - insert_with_max ------------------ - + insert_with_max +--------------------------------------------------------------------- + (1 row) SELECT insert_with_max('key'); - insert_with_max ------------------ - + insert_with_max +--------------------------------------------------------------------- + (1 row) SELECT insert_with_max('key'); - insert_with_max ------------------ - + insert_with_max +--------------------------------------------------------------------- + (1 row) SELECT insert_with_max('key'); - insert_with_max ------------------ - + insert_with_max +--------------------------------------------------------------------- + (1 row) SELECT insert_with_max('key'); - insert_with_max ------------------ - + insert_with_max +--------------------------------------------------------------------- + (1 row) SELECT insert_with_max('key'); - insert_with_max ------------------ - + insert_with_max +--------------------------------------------------------------------- + (1 row) SELECT key, seq FROM func_parameter_test ORDER BY seq; - key | seq ------+----- + key | seq +--------------------------------------------------------------------- key | 1 key | 2 key | 3 @@ -1187,9 +1187,9 @@ DROP TABLE func_parameter_test; SET citus.multi_shard_commit_protocol TO '2pc'; CREATE TABLE prepare_ddl (x int, y int); SELECT create_distributed_table('prepare_ddl', 'x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE OR REPLACE FUNCTION ddl_in_plpgsql() @@ -1201,15 +1201,15 @@ BEGIN END; $BODY$ LANGUAGE plpgsql; SELECT ddl_in_plpgsql(); - ddl_in_plpgsql ----------------- - + ddl_in_plpgsql +--------------------------------------------------------------------- + (1 row) SELECT ddl_in_plpgsql(); - ddl_in_plpgsql ----------------- - + ddl_in_plpgsql +--------------------------------------------------------------------- + (1 row) -- test prepared ddl with multi search path to make sure the schema name doesn't leak on @@ -1224,9 +1224,9 @@ $BODY$ LANGUAGE plpgsql; CREATE SCHEMA otherschema; SET search_path TO otherschema, public; SELECT ddl_in_plpgsql(); - ddl_in_plpgsql ----------------- - + ddl_in_plpgsql +--------------------------------------------------------------------- + (1 row) DROP INDEX prepared_index; @@ -1234,21 +1234,21 @@ DROP INDEX prepared_index; -- created on this table, but instead on the table in the public schema CREATE TABLE prepare_ddl (x int, y int); SELECT create_distributed_table('prepare_ddl', 'x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT ddl_in_plpgsql(); - ddl_in_plpgsql ----------------- - + ddl_in_plpgsql +--------------------------------------------------------------------- + (1 row) -- verify the index is created in the correct schema SELECT schemaname, indexrelname FROM pg_stat_all_indexes WHERE indexrelname = 'prepared_index'; - schemaname | indexrelname --------------+---------------- + schemaname | indexrelname +--------------------------------------------------------------------- otherschema | prepared_index (1 row) @@ -1264,15 +1264,15 @@ BEGIN END; $BODY$ LANGUAGE plpgsql; SELECT copy_in_plpgsql(); - copy_in_plpgsql ------------------ - + copy_in_plpgsql +--------------------------------------------------------------------- + (1 row) SELECT copy_in_plpgsql(); - copy_in_plpgsql ------------------ - + copy_in_plpgsql +--------------------------------------------------------------------- + (1 row) -- test prepared COPY on a non-distributed table @@ -1285,15 +1285,15 @@ BEGIN END; $BODY$ LANGUAGE plpgsql; SELECT local_copy_in_plpgsql(); - local_copy_in_plpgsql ------------------------ - + local_copy_in_plpgsql +--------------------------------------------------------------------- + (1 row) SELECT local_copy_in_plpgsql(); - local_copy_in_plpgsql ------------------------ - + local_copy_in_plpgsql +--------------------------------------------------------------------- + (1 row) -- types statements should not crash nor leak schema specifications on to cached statements @@ -1309,23 +1309,23 @@ BEGIN END; $function$; SELECT type_ddl_plpgsql(); - type_ddl_plpgsql ------------------- - + type_ddl_plpgsql +--------------------------------------------------------------------- + (1 row) -- create same type in new schema, owner of this new type should change CREATE TYPE prepare_ddl_type AS (x int, y int); SELECT type_ddl_plpgsql(); - type_ddl_plpgsql ------------------- - + type_ddl_plpgsql +--------------------------------------------------------------------- + (1 row) -- find all renamed types to verify the schema name didn't leak, nor a crash happened SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup'; - nspname | typname --------------+------------------------- + nspname | typname +--------------------------------------------------------------------- public | prepare_ddl_type_backup otherschema | prepare_ddl_type_backup (2 rows) diff --git a/src/test/regress/expected/multi_prepare_sql.out b/src/test/regress/expected/multi_prepare_sql.out index 3ded28e5a..8338ee3f9 100644 --- a/src/test/regress/expected/multi_prepare_sql.out +++ b/src/test/regress/expected/multi_prepare_sql.out @@ -2,8 +2,8 @@ -- MULTI_PREPARE_SQL -- -- many of the tests in this file is intended for testing non-fast-path --- router planner, so we're explicitly disabling it in this file. --- We've bunch of other tests that triggers fast-path-router +-- router planner, so we're explicitly disabling it in this file. +-- We've bunch of other tests that triggers fast-path-router SET citus.enable_fast_path_router_planner TO false; -- Tests covering PREPARE statements. Many of the queries are -- taken from other regression test files and converted into @@ -103,120 +103,120 @@ SET citus.task_executor_type TO 'task-tracker'; SET client_min_messages TO INFO; -- execute prepared statements EXECUTE prepared_test_1; - count -------- + count +--------------------------------------------------------------------- 2985 (1 row) EXECUTE prepared_test_2; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) EXECUTE prepared_test_3; - count -------- + count +--------------------------------------------------------------------- 1956 (1 row) EXECUTE prepared_test_4; - count -------- + count +--------------------------------------------------------------------- 7806 (1 row) EXECUTE prepared_test_5; - count -------- + count +--------------------------------------------------------------------- 39 (1 row) -- execute prepared statements with different parameters EXECUTE prepared_test_6(155); - count -------- + count +--------------------------------------------------------------------- 11813 (1 row) EXECUTE prepared_test_6(1555); - count -------- + count +--------------------------------------------------------------------- 10185 (1 row) EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA'); - supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+------------ + supp_nation | cust_nation | l_year | revenue +--------------------------------------------------------------------- UNITED KINGDOM | CHINA | 1996 | 18560.2200 (1 row) EXECUTE prepared_test_7('FRANCE', 'GERMANY'); - supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- + supp_nation | cust_nation | l_year | revenue +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) -- now, execute prepared statements with random order EXECUTE prepared_test_6(155); - count -------- + count +--------------------------------------------------------------------- 11813 (1 row) EXECUTE prepared_test_3; - count -------- + count +--------------------------------------------------------------------- 1956 (1 row) EXECUTE prepared_test_7('FRANCE', 'GERMANY'); - supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- + supp_nation | cust_nation | l_year | revenue +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) EXECUTE prepared_test_5; - count -------- + count +--------------------------------------------------------------------- 39 (1 row) EXECUTE prepared_test_1; - count -------- + count +--------------------------------------------------------------------- 2985 (1 row) EXECUTE prepared_test_6(1555); - count -------- + count +--------------------------------------------------------------------- 10185 (1 row) EXECUTE prepared_test_4; - count -------- + count +--------------------------------------------------------------------- 7806 (1 row) EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA'); - supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+------------ + supp_nation | cust_nation | l_year | revenue +--------------------------------------------------------------------- UNITED KINGDOM | CHINA | 1996 | 18560.2200 (1 row) EXECUTE prepared_test_2; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) -- CREATE TABLE ... AS EXECUTE prepared_statement tests CREATE TEMP TABLE prepared_sql_test_7 AS EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA'); SELECT * from prepared_sql_test_7; - supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+------------ + supp_nation | cust_nation | l_year | revenue +--------------------------------------------------------------------- UNITED KINGDOM | CHINA | 1996 | 18560.2200 (1 row) @@ -224,27 +224,27 @@ SELECT * from prepared_sql_test_7; RESET citus.task_executor_type; -- execute prepared statements EXECUTE prepared_test_1; - count -------- + count +--------------------------------------------------------------------- 2985 (1 row) EXECUTE prepared_test_2; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) -- execute prepared statements with different parameters EXECUTE prepared_test_6(155); - count -------- + count +--------------------------------------------------------------------- 11813 (1 row) EXECUTE prepared_test_6(1555); - count -------- + count +--------------------------------------------------------------------- 10185 (1 row) @@ -261,9 +261,9 @@ CREATE TABLE router_executor_table ( ); SET citus.shard_count TO 2; SELECT create_distributed_table('router_executor_table', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- test parameterized inserts @@ -276,8 +276,8 @@ EXECUTE prepared_insert('comment-4', '(4, 40)'); EXECUTE prepared_insert('comment-5', '(5, 50)'); EXECUTE prepared_insert('comment-6', '(6, 60)'); SELECT * FROM router_executor_table ORDER BY comment; - id | comment | stats -----+-----------+-------- + id | comment | stats +--------------------------------------------------------------------- 1 | comment-1 | (1,10) 1 | comment-2 | (2,20) 1 | comment-3 | (3,30) @@ -291,38 +291,38 @@ PREPARE prepared_select(integer, integer) AS SELECT count(*) FROM router_executor_table WHERE id = 1 AND stats = ROW($1, $2)::test_composite_type; EXECUTE prepared_select(1, 10); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) EXECUTE prepared_select(2, 20); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) EXECUTE prepared_select(3, 30); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) EXECUTE prepared_select(4, 40); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) EXECUTE prepared_select(5, 50); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) EXECUTE prepared_select(6, 60); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -348,9 +348,9 @@ CREATE TABLE prepare_table ( SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('prepare_table','key','hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) PREPARE prepared_no_parameter_insert AS @@ -400,32 +400,32 @@ EXECUTE prepared_non_partition_parameter_insert(50); EXECUTE prepared_non_partition_parameter_insert(60); -- check inserted values SELECT * FROM prepare_table ORDER BY key, value; - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 0 | 20 0 | 30 0 | 40 0 | 50 0 | 60 - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | 1 | 10 - 1 | + 1 | 2 | 20 - 2 | + 2 | 3 | 30 - 3 | + 3 | 4 | 40 - 4 | + 4 | 5 | 50 - 5 | + 5 | 6 | 60 - 6 | + 6 | 7 | 70 8 | 80 9 | 90 @@ -454,45 +454,45 @@ PREPARE prepared_router_partition_column_select(int) AS key, value; EXECUTE prepared_router_partition_column_select(1); - key | value ------+------- + key | value +--------------------------------------------------------------------- 1 | 10 - 1 | + 1 | (2 rows) EXECUTE prepared_router_partition_column_select(2); - key | value ------+------- + key | value +--------------------------------------------------------------------- 2 | 20 - 2 | + 2 | (2 rows) EXECUTE prepared_router_partition_column_select(3); - key | value ------+------- + key | value +--------------------------------------------------------------------- 3 | 30 - 3 | + 3 | (2 rows) EXECUTE prepared_router_partition_column_select(4); - key | value ------+------- + key | value +--------------------------------------------------------------------- 4 | 40 - 4 | + 4 | (2 rows) EXECUTE prepared_router_partition_column_select(5); - key | value ------+------- + key | value +--------------------------------------------------------------------- 5 | 50 - 5 | + 5 | (2 rows) EXECUTE prepared_router_partition_column_select(6); - key | value ------+------- + key | value +--------------------------------------------------------------------- 6 | 60 - 6 | + 6 | (2 rows) PREPARE prepared_router_non_partition_column_select(int) AS @@ -508,38 +508,38 @@ PREPARE prepared_router_non_partition_column_select(int) AS key, value; EXECUTE prepared_router_non_partition_column_select(10); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 (1 row) EXECUTE prepared_router_non_partition_column_select(20); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 20 (1 row) EXECUTE prepared_router_non_partition_column_select(30); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 30 (1 row) EXECUTE prepared_router_non_partition_column_select(40); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 40 (1 row) EXECUTE prepared_router_non_partition_column_select(50); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 50 (1 row) EXECUTE prepared_router_non_partition_column_select(60); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 60 (1 row) @@ -556,43 +556,43 @@ PREPARE prepared_real_time_non_partition_column_select(int) AS key, value; EXECUTE prepared_real_time_non_partition_column_select(10); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 1 | 10 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(20); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 20 2 | 20 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(30); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 30 3 | 30 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(40); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 40 4 | 40 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(50); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 50 5 | 50 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(60); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 60 6 | 60 (2 rows) @@ -610,56 +610,56 @@ PREPARE prepared_real_time_partition_column_select(int) AS key, value; EXECUTE prepared_real_time_partition_column_select(1); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 1 | 10 - 1 | + 1 | (3 rows) EXECUTE prepared_real_time_partition_column_select(2); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 1 | 10 2 | 20 - 2 | + 2 | (4 rows) EXECUTE prepared_real_time_partition_column_select(3); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 1 | 10 3 | 30 - 3 | + 3 | (4 rows) EXECUTE prepared_real_time_partition_column_select(4); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 1 | 10 4 | 40 - 4 | + 4 | (4 rows) EXECUTE prepared_real_time_partition_column_select(5); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 1 | 10 5 | 50 - 5 | + 5 | (4 rows) EXECUTE prepared_real_time_partition_column_select(6); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 1 | 10 6 | 60 - 6 | + 6 | (4 rows) -- check task-tracker executor @@ -676,43 +676,43 @@ PREPARE prepared_task_tracker_non_partition_column_select(int) AS key, value; EXECUTE prepared_task_tracker_non_partition_column_select(10); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 1 | 10 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(20); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 20 2 | 20 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(30); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 30 3 | 30 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(40); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 40 4 | 40 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(50); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 50 5 | 50 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(60); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 60 6 | 60 (2 rows) @@ -730,56 +730,56 @@ PREPARE prepared_task_tracker_partition_column_select(int) AS key, value; EXECUTE prepared_task_tracker_partition_column_select(1); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 1 | 10 - 1 | + 1 | (3 rows) EXECUTE prepared_task_tracker_partition_column_select(2); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 1 | 10 2 | 20 - 2 | + 2 | (4 rows) EXECUTE prepared_task_tracker_partition_column_select(3); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 1 | 10 3 | 30 - 3 | + 3 | (4 rows) EXECUTE prepared_task_tracker_partition_column_select(4); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 1 | 10 4 | 40 - 4 | + 4 | (4 rows) EXECUTE prepared_task_tracker_partition_column_select(5); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 1 | 10 5 | 50 - 5 | + 5 | (4 rows) EXECUTE prepared_task_tracker_partition_column_select(6); - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 1 | 10 6 | 60 - 6 | + 6 | (4 rows) RESET citus.task_executor_type; @@ -804,20 +804,20 @@ EXECUTE prepared_non_partition_parameter_update(50, 52); EXECUTE prepared_non_partition_parameter_update(60, 62); -- check after updates SELECT * FROM prepare_table ORDER BY key, value; - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 12 0 | 22 0 | 32 0 | 42 0 | 52 0 | 62 - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | 1 | 11 1 | 11 2 | 21 @@ -852,14 +852,14 @@ EXECUTE prepared_non_partition_parameter_delete(52); EXECUTE prepared_non_partition_parameter_delete(62); -- check after deletes SELECT * FROM prepare_table ORDER BY key, value; - key | value ------+------- - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + key | value +--------------------------------------------------------------------- + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | (6 rows) -- Testing parameters + function evaluation @@ -870,9 +870,9 @@ CREATE TABLE prepare_func_table ( value3 timestamptz DEFAULT now() ); SELECT create_distributed_table('prepare_func_table', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- test function evaluation with parameters in an expression @@ -886,8 +886,8 @@ EXECUTE prepared_function_evaluation_insert(4); EXECUTE prepared_function_evaluation_insert(5); EXECUTE prepared_function_evaluation_insert(6); SELECT key, value1 FROM prepare_func_table ORDER BY key; - key | value1 ------+-------- + key | value1 +--------------------------------------------------------------------- 2 | 0 3 | 0 4 | 0 @@ -907,8 +907,8 @@ EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); SELECT key, value2 FROM prepare_func_table; - key | value2 ------+-------- + key | value2 +--------------------------------------------------------------------- key | value key | value key | value @@ -924,9 +924,9 @@ CREATE TABLE text_partition_column_table ( value int ); SELECT create_distributed_table('text_partition_column_table', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) PREPARE prepared_relabel_insert(varchar) AS @@ -938,8 +938,8 @@ EXECUTE prepared_relabel_insert('test'); EXECUTE prepared_relabel_insert('test'); EXECUTE prepared_relabel_insert('test'); SELECT key, value FROM text_partition_column_table ORDER BY key; - key | value -------+------- + key | value +--------------------------------------------------------------------- test | 1 test | 1 test | 1 @@ -954,8 +954,8 @@ CREATE DOMAIN test_key AS text CHECK(VALUE ~ '^test-\d$'); SELECT run_command_on_workers($$ CREATE DOMAIN test_key AS text CHECK(VALUE ~ '^test-\d$') $$); - run_command_on_workers -------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE DOMAIN") (localhost,57638,t,"CREATE DOMAIN") (2 rows) @@ -965,9 +965,9 @@ CREATE TABLE domain_partition_column_table ( value int ); SELECT create_distributed_table('domain_partition_column_table', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) PREPARE prepared_coercion_to_domain_insert(text) AS @@ -979,8 +979,8 @@ EXECUTE prepared_coercion_to_domain_insert('test-4'); EXECUTE prepared_coercion_to_domain_insert('test-5'); EXECUTE prepared_coercion_to_domain_insert('test-6'); SELECT key, value FROM domain_partition_column_table ORDER BY key; - key | value ---------+------- + key | value +--------------------------------------------------------------------- test-1 | 1 test-2 | 1 test-3 | 1 @@ -1001,9 +1001,9 @@ CREATE TABLE http_request ( response_time_msec INT ); SELECT create_distributed_table('http_request', 'site_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) PREPARE FOO AS INSERT INTO http_request ( @@ -1020,8 +1020,8 @@ EXECUTE foo; EXECUTE foo; EXECUTE foo; SELECT count(distinct ingest_time) FROM http_request WHERE site_id = 1; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) @@ -1044,9 +1044,9 @@ CREATE TABLE test_table (test_id integer NOT NULL, data text); SET citus.shard_count TO 2; SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('test_table', 'test_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- avoid 9.6+ only context messages @@ -1054,21 +1054,21 @@ SELECT create_distributed_table('test_table', 'test_id', 'hash'); --plain statement, needs planning SELECT count(*) FROM test_table HAVING COUNT(*) = immutable_bleat('replanning'); NOTICE: replanning - count -------- + count +--------------------------------------------------------------------- (0 rows) --prepared statement PREPARE countsome AS SELECT count(*) FROM test_table HAVING COUNT(*) = immutable_bleat('replanning'); EXECUTE countsome; -- should indicate planning NOTICE: replanning - count -------- + count +--------------------------------------------------------------------- (0 rows) EXECUTE countsome; -- no replanning - count -------- + count +--------------------------------------------------------------------- (0 rows) -- invalidate half of the placements using SQL, should invalidate via trigger @@ -1078,13 +1078,13 @@ WHERE shardid IN ( AND nodeport = :worker_1_port; EXECUTE countsome; -- should indicate replanning NOTICE: replanning - count -------- + count +--------------------------------------------------------------------- (0 rows) EXECUTE countsome; -- no replanning - count -------- + count +--------------------------------------------------------------------- (0 rows) -- repair shards, should invalidate via master_metadata_utility.c @@ -1093,21 +1093,21 @@ FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_table'::regclass) AND nodeport = :worker_1_port; - master_copy_shard_placement ------------------------------ - - + master_copy_shard_placement +--------------------------------------------------------------------- + + (2 rows) EXECUTE countsome; -- should indicate replanning NOTICE: replanning - count -------- + count +--------------------------------------------------------------------- (0 rows) EXECUTE countsome; -- no replanning - count -------- + count +--------------------------------------------------------------------- (0 rows) -- reset diff --git a/src/test/regress/expected/multi_prune_shard_list.out b/src/test/regress/expected/multi_prune_shard_list.out index a2a34a582..02d1a72e9 100644 --- a/src/test/regress/expected/multi_prune_shard_list.out +++ b/src/test/regress/expected/multi_prune_shard_list.out @@ -35,64 +35,64 @@ CREATE FUNCTION print_sorted_shard_intervals(regclass) CREATE TABLE pruning ( species text, last_pruned date, plant_id integer ); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('pruning', 'species', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- with no values, expect all shards SELECT prune_using_no_values('pruning'); - prune_using_no_values -------------------------------- + prune_using_no_values +--------------------------------------------------------------------- {800000,800001,800002,800003} (1 row) -- with a single value, expect a single shard SELECT prune_using_single_value('pruning', 'tomato'); - prune_using_single_value --------------------------- + prune_using_single_value +--------------------------------------------------------------------- {800002} (1 row) -- null values should result in no pruning SELECT prune_using_single_value('pruning', NULL); - prune_using_single_value -------------------------------- + prune_using_single_value +--------------------------------------------------------------------- {800000,800001,800002,800003} (1 row) -- build an OR clause and expect more than one sahrd SELECT prune_using_either_value('pruning', 'tomato', 'petunia'); - prune_using_either_value --------------------------- + prune_using_either_value +--------------------------------------------------------------------- {800002,800001} (1 row) -- an AND clause with values on different shards returns no shards SELECT prune_using_both_values('pruning', 'tomato', 'petunia'); - prune_using_both_values -------------------------- + prune_using_both_values +--------------------------------------------------------------------- {} (1 row) -- even if both values are on the same shard, a value can't be equal to two others SELECT prune_using_both_values('pruning', 'tomato', 'rose'); - prune_using_both_values -------------------------- + prune_using_both_values +--------------------------------------------------------------------- {} (1 row) -- unit test of the equality expression generation code SELECT debug_equality_expression('pruning'); - debug_equality_expression --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + debug_equality_expression +--------------------------------------------------------------------- {OPEXPR :opno 98 :opfuncid 67 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 100 :args ({VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} {CONST :consttype 25 :consttypmod -1 :constcollid 100 :constlen -1 :constbyval false :constisnull true :location -1 :constvalue <>}) :location -1} (1 row) -- print the initial ordering of shard intervals SELECT print_sorted_shard_intervals('pruning'); - print_sorted_shard_intervals -------------------------------- + print_sorted_shard_intervals +--------------------------------------------------------------------- {800000,800001,800002,800003} (1 row) @@ -105,33 +105,33 @@ UPDATE pg_dist_shard set shardminvalue = -1073741824 WHERE shardid = 800001; -- create range distributed table observe shard pruning CREATE TABLE pruning_range ( species text, last_pruned date, plant_id integer ); SELECT create_distributed_table('pruning_range', 'species', 'range'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- create worker shards SELECT master_create_empty_shard('pruning_range'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 800004 (1 row) SELECT master_create_empty_shard('pruning_range'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 800005 (1 row) SELECT master_create_empty_shard('pruning_range'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 800006 (1 row) SELECT master_create_empty_shard('pruning_range'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 800007 (1 row) @@ -142,40 +142,40 @@ UPDATE pg_dist_shard SET shardminvalue = 'e', shardmaxvalue = 'f' WHERE shardid UPDATE pg_dist_shard SET shardminvalue = 'g', shardmaxvalue = 'h' WHERE shardid = 800007; -- print the ordering of shard intervals with range partitioning as well SELECT print_sorted_shard_intervals('pruning_range'); - print_sorted_shard_intervals -------------------------------- + print_sorted_shard_intervals +--------------------------------------------------------------------- {800004,800005,800006,800007} (1 row) -- update only min value for one shard UPDATE pg_dist_shard set shardminvalue = NULL WHERE shardid = 800005; SELECT print_sorted_shard_intervals('pruning_range'); - print_sorted_shard_intervals -------------------------------- + print_sorted_shard_intervals +--------------------------------------------------------------------- {800004,800006,800007,800005} (1 row) -- now lets have one more shard without min/max values UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 800006; SELECT print_sorted_shard_intervals('pruning_range'); - print_sorted_shard_intervals -------------------------------- + print_sorted_shard_intervals +--------------------------------------------------------------------- {800004,800007,800005,800006} (1 row) -- now lets have one more shard without min/max values UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 800004; SELECT print_sorted_shard_intervals('pruning_range'); - print_sorted_shard_intervals -------------------------------- + print_sorted_shard_intervals +--------------------------------------------------------------------- {800007,800004,800005,800006} (1 row) -- all shard placements are uninitialized UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 800007; SELECT print_sorted_shard_intervals('pruning_range'); - print_sorted_shard_intervals -------------------------------- + print_sorted_shard_intervals +--------------------------------------------------------------------- {800004,800005,800006,800007} (1 row) @@ -187,24 +187,24 @@ CREATE TABLE coerce_hash ( value text NOT NULL ); SELECT create_distributed_table('coerce_hash', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO coerce_hash VALUES (1, 'test value'); -- All three of the following should return the same results... -- SELECT with same type as partition column SELECT * FROM coerce_hash WHERE id = 1::bigint; - id | value -----+------------ + id | value +--------------------------------------------------------------------- 1 | test value (1 row) -- SELECT with similar type to partition column SELECT * FROM coerce_hash WHERE id = 1; - id | value -----+------------ + id | value +--------------------------------------------------------------------- 1 | test value (1 row) @@ -216,14 +216,14 @@ SELECT * FROM coerce_hash WHERE id = 1; -- before the underlying issue was addressed. It looks like a boring -- test now, but if the old behavior is restored, it should crash again. SELECT * FROM coerce_hash WHERE id = 1.0; - id | value -----+------------ + id | value +--------------------------------------------------------------------- 1 | test value (1 row) SELECT * FROM coerce_hash WHERE id = 1.0::numeric; - id | value -----+------------ + id | value +--------------------------------------------------------------------- 1 | test value (1 row) diff --git a/src/test/regress/expected/multi_query_directory_cleanup.out b/src/test/regress/expected/multi_query_directory_cleanup.out index 5d3e38f5e..5ce27e966 100644 --- a/src/test/regress/expected/multi_query_directory_cleanup.out +++ b/src/test/regress/expected/multi_query_directory_cleanup.out @@ -17,8 +17,8 @@ with silence as ( ) select count(*) * 0 zero from silence; - zero ------- + zero +--------------------------------------------------------------------- 0 (1 row) @@ -27,62 +27,62 @@ BEGIN; -- here so that the regression output becomes independent of the -- number of jobs executed prior to running this test. SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir ------------ + pg_ls_dir +--------------------------------------------------------------------- (0 rows) COMMIT; SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir ------------ + pg_ls_dir +--------------------------------------------------------------------- (0 rows) BEGIN; SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir ------------ + pg_ls_dir +--------------------------------------------------------------------- (0 rows) ROLLBACK; SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir ------------ + pg_ls_dir +--------------------------------------------------------------------- (0 rows) -- Test that multiple job directories are all cleaned up correctly, @@ -91,147 +91,147 @@ SELECT pg_ls_dir('base/pgsql_job_cache'); BEGIN; DECLARE c_00 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_00; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_01 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_01; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_02 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_02; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_03 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_03; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_04 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_04; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_05 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_05; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_06 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_06; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_07 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_07; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_08 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_08; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_09 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_09; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_10 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_10; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_11 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_11; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_12 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_12; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_13 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_13; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_14 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_14; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_15 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_15; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_16 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_16; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_17 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_17; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_18 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_18; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_19 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_19; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; - f ---- + f +--------------------------------------------------------------------- (0 rows) -- close first, 17th (first after re-alloc) and last cursor. @@ -239,13 +239,13 @@ CLOSE c_00; CLOSE c_16; CLOSE c_19; SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; - f ---- + f +--------------------------------------------------------------------- (0 rows) ROLLBACK; SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir ------------ + pg_ls_dir +--------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_query_directory_cleanup_0.out b/src/test/regress/expected/multi_query_directory_cleanup_0.out index ed98c36b4..37fbcc364 100644 --- a/src/test/regress/expected/multi_query_directory_cleanup_0.out +++ b/src/test/regress/expected/multi_query_directory_cleanup_0.out @@ -17,8 +17,8 @@ with silence as ( ) select count(*) * 0 zero from silence; - zero ------- + zero +--------------------------------------------------------------------- 0 (1 row) @@ -27,62 +27,62 @@ BEGIN; -- here so that the regression output becomes independent of the -- number of jobs executed prior to running this test. SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir ------------ + pg_ls_dir +--------------------------------------------------------------------- (0 rows) COMMIT; SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir ------------ + pg_ls_dir +--------------------------------------------------------------------- (0 rows) BEGIN; SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir ------------ + pg_ls_dir +--------------------------------------------------------------------- (0 rows) ROLLBACK; SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir ------------ + pg_ls_dir +--------------------------------------------------------------------- (0 rows) -- Test that multiple job directories are all cleaned up correctly, @@ -91,147 +91,147 @@ SELECT pg_ls_dir('base/pgsql_job_cache'); BEGIN; DECLARE c_00 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_00; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_01 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_01; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_02 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_02; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_03 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_03; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_04 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_04; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_05 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_05; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_06 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_06; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_07 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_07; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_08 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_08; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_09 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_09; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_10 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_10; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_11 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_11; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_12 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_12; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_13 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_13; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_14 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_14; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_15 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_15; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_16 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_16; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_17 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_17; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_18 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_18; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_19 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_19; - revenue ---------------- + revenue +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; - f ------------------ + f +--------------------------------------------------------------------- master_job_0007 master_job_0008 master_job_0009 @@ -259,8 +259,8 @@ CLOSE c_00; CLOSE c_16; CLOSE c_19; SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; - f ------------------ + f +--------------------------------------------------------------------- master_job_0008 master_job_0009 master_job_0010 @@ -282,7 +282,7 @@ SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; ROLLBACK; SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir ------------ + pg_ls_dir +--------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_read_from_secondaries.out b/src/test/regress/expected/multi_read_from_secondaries.out index 31572c204..a1425e0d9 100644 --- a/src/test/regress/expected/multi_read_from_secondaries.out +++ b/src/test/regress/expected/multi_read_from_secondaries.out @@ -8,15 +8,15 @@ ERROR: writing to worker nodes is not currently allowed DETAIL: citus.use_secondary_nodes is set to 'always' \c "dbname=regression options='-c\ citus.use_secondary_nodes=never'" SELECT create_distributed_table('dest_table', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('source_table', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO dest_table (a, b) VALUES (1, 1); @@ -24,8 +24,8 @@ INSERT INTO dest_table (a, b) VALUES (2, 1); INSERT INTO source_table (a, b) VALUES (10, 10); -- simluate actually having secondary nodes SELECT nodeid, groupid, nodename, nodeport, noderack, isactive, noderole, nodecluster FROM pg_dist_node; - nodeid | groupid | nodename | nodeport | noderack | isactive | noderole | nodecluster ---------+---------+-----------+----------+----------+----------+----------+------------- + nodeid | groupid | nodename | nodeport | noderack | isactive | noderole | nodecluster +--------------------------------------------------------------------- 1 | 1 | localhost | 57637 | default | t | primary | default 2 | 2 | localhost | 57638 | default | t | primary | default (2 rows) @@ -38,15 +38,15 @@ ERROR: writing to worker nodes is not currently allowed DETAIL: citus.use_secondary_nodes is set to 'always' -- router selects are allowed SELECT a FROM dest_table WHERE a = 1 ORDER BY 1; - a ---- + a +--------------------------------------------------------------------- 1 (1 row) -- real-time selects are also allowed SELECT a FROM dest_table ORDER BY 1; - a ---- + a +--------------------------------------------------------------------- 1 2 (2 rows) @@ -58,20 +58,20 @@ SELECT FROM ( WITH cte AS ( - SELECT - DISTINCT dest_table.a - FROM - dest_table, source_table - WHERE - source_table.a = dest_table.a AND + SELECT + DISTINCT dest_table.a + FROM + dest_table, source_table + WHERE + source_table.a = dest_table.a AND dest_table.b IN (1,2,3,4) ) SELECT * FROM cte ORDER BY 1 DESC LIMIT 5 ) as foo ORDER BY 1; -DEBUG: generating subplan 4_1 for CTE cte: SELECT DISTINCT dest_table.a FROM public.dest_table, public.source_table WHERE ((source_table.a OPERATOR(pg_catalog.=) dest_table.a) AND (dest_table.b OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) -DEBUG: generating subplan 4_2 for subquery SELECT a FROM (SELECT intermediate_result.a FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer)) cte ORDER BY a DESC LIMIT 5 -DEBUG: Plan 4 query after replacing subqueries and CTEs: SELECT a FROM (SELECT intermediate_result.a FROM read_intermediate_result('4_2'::text, 'binary'::citus_copy_format) intermediate_result(a integer)) foo ORDER BY a - a ---- +DEBUG: generating subplan XXX_1 for CTE cte: SELECT DISTINCT dest_table.a FROM public.dest_table, public.source_table WHERE ((source_table.a OPERATOR(pg_catalog.=) dest_table.a) AND (dest_table.b OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) +DEBUG: generating subplan XXX_2 for subquery SELECT a FROM (SELECT intermediate_result.a FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer)) cte ORDER BY a DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT a FROM (SELECT intermediate_result.a FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(a integer)) foo ORDER BY a + a +--------------------------------------------------------------------- (0 rows) SET client_min_messages TO DEFAULT; diff --git a/src/test/regress/expected/multi_real_time_transaction.out b/src/test/regress/expected/multi_real_time_transaction.out index fbbddbc0d..795ce8464 100644 --- a/src/test/regress/expected/multi_real_time_transaction.out +++ b/src/test/regress/expected/multi_real_time_transaction.out @@ -7,40 +7,40 @@ SET search_path = 'multi_real_time_transaction'; SET citus.shard_replication_factor to 1; CREATE TABLE test_table(id int, col_1 int, col_2 text); SELECT create_distributed_table('test_table','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \COPY test_table FROM stdin delimiter ','; CREATE TABLE co_test_table(id int, col_1 int, col_2 text); SELECT create_distributed_table('co_test_table','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \COPY co_test_table FROM stdin delimiter ','; CREATE TABLE ref_test_table(id int, col_1 int, col_2 text); SELECT create_reference_table('ref_test_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) \COPY ref_test_table FROM stdin delimiter ','; -- Test with select and router insert BEGIN; SELECT COUNT(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) INSERT INTO test_table VALUES(7,8,'gg'); SELECT COUNT(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 7 (1 row) @@ -48,15 +48,15 @@ ROLLBACK; -- Test with select and multi-row insert BEGIN; SELECT COUNT(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) INSERT INTO test_table VALUES (7,8,'gg'),(8,9,'hh'),(9,10,'ii'); SELECT COUNT(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 9 (1 row) @@ -64,15 +64,15 @@ ROLLBACK; -- Test with INSERT .. SELECT BEGIN; SELECT COUNT(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) INSERT INTO test_table SELECT * FROM co_test_table; SELECT COUNT(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 12 (1 row) @@ -80,15 +80,15 @@ ROLLBACK; -- Test with COPY BEGIN; SELECT COUNT(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) \COPY test_table FROM stdin delimiter ','; SELECT COUNT(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 9 (1 row) @@ -96,16 +96,16 @@ ROLLBACK; -- Test with router update BEGIN; SELECT SUM(col_1) FROM test_table; - sum ------ + sum +--------------------------------------------------------------------- 27 (1 row) UPDATE test_table SET col_1 = 0 WHERE id = 2; DELETE FROM test_table WHERE id = 3; SELECT SUM(col_1) FROM test_table; - sum ------ + sum +--------------------------------------------------------------------- 20 (1 row) @@ -113,15 +113,15 @@ ROLLBACK; -- Test with multi-shard update BEGIN; SELECT SUM(col_1) FROM test_table; - sum ------ + sum +--------------------------------------------------------------------- 27 (1 row) UPDATE test_table SET col_1 = 5; SELECT SUM(col_1) FROM test_table; - sum ------ + sum +--------------------------------------------------------------------- 30 (1 row) @@ -129,8 +129,8 @@ ROLLBACK; -- Test with subqueries BEGIN; SELECT SUM(col_1) FROM test_table; - sum ------ + sum +--------------------------------------------------------------------- 27 (1 row) @@ -142,8 +142,8 @@ WHERE test_table.col_1 IN (SELECT co_test_table.col_1 FROM co_test_table WHERE co_test_table.id = 1) AND test_table.id = 1; SELECT SUM(col_1) FROM test_table; - sum ------ + sum +--------------------------------------------------------------------- 29 (1 row) @@ -160,23 +160,23 @@ INSERT INTO partitioning_test VALUES (2, '2010-07-07'); SELECT create_distributed_table('partitioning_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) BEGIN; SELECT COUNT(*) FROM partitioning_test; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) INSERT INTO partitioning_test_2009 VALUES (3, '2009-09-09'); INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03'); SELECT COUNT(*) FROM partitioning_test; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -186,15 +186,15 @@ DROP TABLE partitioning_test; BEGIN; CREATE TABLE test_table_inn(id int, num_1 int); SELECT create_distributed_table('test_table_inn','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_table_inn VALUES(1,3),(4,5),(6,7); SELECT COUNT(*) FROM test_table_inn; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) @@ -203,32 +203,32 @@ COMMIT; -- Test with utility functions BEGIN; SELECT COUNT(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) CREATE INDEX tt_ind_1 ON test_table(col_1); ALTER TABLE test_table ADD CONSTRAINT num_check CHECK (col_1 < 50); SELECT COUNT(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) ROLLBACK; -- We don't get a distributed transaction id outside a transaction block SELECT (get_current_transaction_id()).transaction_number > 0 FROM test_table LIMIT 1; - ?column? ----------- + ?column? +--------------------------------------------------------------------- f (1 row) -- We should get a distributed transaction id inside a transaction block BEGIN; SELECT (get_current_transaction_id()).transaction_number > 0 FROM test_table LIMIT 1; - ?column? ----------- + ?column? +--------------------------------------------------------------------- t (1 row) @@ -244,23 +244,23 @@ BEGIN END; $BODY$ LANGUAGE plpgsql; $$); - run_command_on_master_and_workers ------------------------------------ - + run_command_on_master_and_workers +--------------------------------------------------------------------- + (1 row) -- SELECT should be rolled back because we send BEGIN BEGIN; SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) -- Sneakily insert directly into shards SELECT insert_row_test(pg_typeof(test_table)::name) FROM test_table; - insert_row_test ------------------ + insert_row_test +--------------------------------------------------------------------- t t t @@ -270,15 +270,15 @@ SELECT insert_row_test(pg_typeof(test_table)::name) FROM test_table; (6 rows) SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 12 (1 row) ABORT; SELECT count(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) @@ -288,8 +288,8 @@ ALTER TABLE co_test_table ADD CONSTRAINT f_key_ctt FOREIGN KEY (id) REFERENCES t BEGIN; DELETE FROM test_table where id = 1 or id = 3; SELECT * FROM co_test_table; - id | col_1 | col_2 -----+-------+-------- + id | col_1 | col_2 +--------------------------------------------------------------------- 2 | 30 | 'bb10' (1 row) @@ -299,8 +299,8 @@ ROLLBACK; SET client_min_messages TO ERROR; alter system set deadlock_timeout TO '250ms'; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) @@ -348,8 +348,8 @@ ROLLBACK; -- gonna need a non-superuser as we'll use RLS to test GUC propagation CREATE USER rls_user; SELECT run_command_on_workers('CREATE USER rls_user'); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -357,15 +357,15 @@ SELECT run_command_on_workers('CREATE USER rls_user'); GRANT ALL ON SCHEMA multi_real_time_transaction TO rls_user; GRANT ALL ON ALL TABLES IN SCHEMA multi_real_time_transaction TO rls_user; SELECT run_command_on_workers('GRANT ALL ON SCHEMA multi_real_time_transaction TO rls_user'); - run_command_on_workers ---------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,GRANT) (localhost,57638,t,GRANT) (2 rows) SELECT run_command_on_workers('GRANT ALL ON ALL TABLES IN SCHEMA multi_real_time_transaction TO rls_user'); - run_command_on_workers ---------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,GRANT) (localhost,57638,t,GRANT) (2 rows) @@ -381,8 +381,8 @@ SET ROLE rls_user; SET search_path = 'multi_real_time_transaction'; -- shouldn't see all rows because of RLS SELECT COUNT(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -390,8 +390,8 @@ BEGIN; -- without enabling SET LOCAL prop, still won't work SET LOCAL app.show_rows TO TRUE; SELECT COUNT(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -399,23 +399,23 @@ SET LOCAL citus.propagate_set_commands TO 'local'; -- now we should be good to go SET LOCAL app.show_rows TO TRUE; SELECT COUNT(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) SAVEPOINT disable_rls; SET LOCAL app.show_rows TO FALSE; SELECT COUNT(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) ROLLBACK TO SAVEPOINT disable_rls; SELECT COUNT(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) @@ -423,8 +423,8 @@ SAVEPOINT disable_rls_for_real; SET LOCAL app.show_rows TO FALSE; RELEASE SAVEPOINT disable_rls_for_real; SELECT COUNT(*) FROM test_table; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -436,8 +436,8 @@ RESET ROLE; SET ROLE rls_user; SET search_path = 'multi_real_time_transaction'; SELECT * FROM co_test_table ORDER BY id, col_1; - id | col_1 | col_2 -----+-------+-------- + id | col_1 | col_2 +--------------------------------------------------------------------- 1 | 2 | 'cc2' 1 | 20 | 'aa10' 2 | 30 | 'bb10' @@ -448,34 +448,34 @@ SELECT * FROM co_test_table ORDER BY id, col_1; \c - - - :worker_1_port SET search_path = 'multi_real_time_transaction'; --- shard 1610004 contains data from tenant id 1 +-- shard xxxxx contains data from tenant id 1 SELECT * FROM co_test_table_1610004 ORDER BY id, col_1; - id | col_1 | col_2 -----+-------+-------- + id | col_1 | col_2 +--------------------------------------------------------------------- 1 | 2 | 'cc2' 1 | 20 | 'aa10' (2 rows) SELECT * FROM co_test_table_1610006 ORDER BY id, col_1; - id | col_1 | col_2 -----+-------+------- + id | col_1 | col_2 +--------------------------------------------------------------------- (0 rows) \c - - - :worker_2_port SET search_path = 'multi_real_time_transaction'; --- shard 1610005 contains data from tenant id 3 +-- shard xxxxx contains data from tenant id 3 SELECT * FROM co_test_table_1610005 ORDER BY id, col_1; - id | col_1 | col_2 -----+-------+-------- + id | col_1 | col_2 +--------------------------------------------------------------------- 3 | 4 | 'cc1' 3 | 5 | 'cc2' 3 | 40 | 'cc10' (3 rows) --- shard 1610007 contains data from tenant id 2 +-- shard xxxxx contains data from tenant id 2 SELECT * FROM co_test_table_1610007 ORDER BY id, col_1; - id | col_1 | col_2 -----+-------+-------- + id | col_1 | col_2 +--------------------------------------------------------------------- 2 | 30 | 'bb10' (1 row) @@ -488,8 +488,8 @@ CREATE POLICY filter_by_tenant_id ON co_test_table TO PUBLIC SET citus.enable_ddl_propagation to on; SELECT run_command_on_shards('co_test_table', $cmd$CREATE POLICY filter_by_tenant_id ON %s TO PUBLIC USING (id = ANY(string_to_array(current_setting('app.tenant_id'), ',')::int[]));$cmd$); - run_command_on_shards ------------------------------ + run_command_on_shards +--------------------------------------------------------------------- (1610004,t,"CREATE POLICY") (1610005,t,"CREATE POLICY") (1610006,t,"CREATE POLICY") @@ -501,8 +501,8 @@ SET citus.enable_ddl_propagation to off; ALTER TABLE co_test_table ENABLE ROW LEVEL SECURITY; SET citus.enable_ddl_propagation to on; SELECT run_command_on_shards('co_test_table','ALTER TABLE %s ENABLE ROW LEVEL SECURITY;'); - run_command_on_shards ---------------------------- + run_command_on_shards +--------------------------------------------------------------------- (1610004,t,"ALTER TABLE") (1610005,t,"ALTER TABLE") (1610006,t,"ALTER TABLE") @@ -517,8 +517,8 @@ SET LOCAL citus.propagate_set_commands TO 'local'; -- Only tenant id 1 will be fetched, and so on. SET LOCAL app.tenant_id TO 1; SELECT * FROM co_test_table ORDER BY id, col_1; - id | col_1 | col_2 -----+-------+-------- + id | col_1 | col_2 +--------------------------------------------------------------------- 1 | 2 | 'cc2' 1 | 20 | 'aa10' (2 rows) @@ -526,8 +526,8 @@ SELECT * FROM co_test_table ORDER BY id, col_1; SAVEPOINT disable_rls; SET LOCAL app.tenant_id TO 3; SELECT * FROM co_test_table ORDER BY id, col_1; - id | col_1 | col_2 -----+-------+-------- + id | col_1 | col_2 +--------------------------------------------------------------------- 3 | 4 | 'cc1' 3 | 5 | 'cc2' 3 | 40 | 'cc10' @@ -535,8 +535,8 @@ SELECT * FROM co_test_table ORDER BY id, col_1; ROLLBACK TO SAVEPOINT disable_rls; SELECT * FROM co_test_table ORDER BY id, col_1; - id | col_1 | col_2 -----+-------+-------- + id | col_1 | col_2 +--------------------------------------------------------------------- 1 | 2 | 'cc2' 1 | 20 | 'aa10' (2 rows) @@ -545,8 +545,8 @@ SAVEPOINT disable_rls_for_real; SET LOCAL app.tenant_id TO 3; RELEASE SAVEPOINT disable_rls_for_real; SELECT * FROM co_test_table ORDER BY id, col_1; - id | col_1 | col_2 -----+-------+-------- + id | col_1 | col_2 +--------------------------------------------------------------------- 3 | 4 | 'cc1' 3 | 5 | 'cc2' 3 | 40 | 'cc10' @@ -557,8 +557,8 @@ RELEASE SAVEPOINT disable_rls; -- via RLS policies that use GUCs. SET LOCAL app.tenant_id TO '1,3'; SELECT * FROM co_test_table ORDER BY id, col_1; - id | col_1 | col_2 -----+-------+-------- + id | col_1 | col_2 +--------------------------------------------------------------------- 1 | 2 | 'cc2' 1 | 20 | 'aa10' 3 | 4 | 'cc1' @@ -573,8 +573,8 @@ SET citus.enable_ddl_propagation to off; ALTER TABLE co_test_table DISABLE ROW LEVEL SECURITY; SET citus.enable_ddl_propagation to on; SELECT run_command_on_shards('co_test_table','ALTER TABLE %s DISABLE ROW LEVEL SECURITY;'); - run_command_on_shards ---------------------------- + run_command_on_shards +--------------------------------------------------------------------- (1610004,t,"ALTER TABLE") (1610005,t,"ALTER TABLE") (1610006,t,"ALTER TABLE") @@ -585,8 +585,8 @@ SET citus.enable_ddl_propagation to off; DROP POLICY filter_by_tenant_id ON co_test_table; SET citus.enable_ddl_propagation to on; SELECT run_command_on_shards('co_test_table', 'DROP POLICY filter_by_tenant_id ON %s;'); - run_command_on_shards ---------------------------- + run_command_on_shards +--------------------------------------------------------------------- (1610004,t,"DROP POLICY") (1610005,t,"DROP POLICY") (1610006,t,"DROP POLICY") @@ -598,22 +598,22 @@ SELECT run_command_on_shards('co_test_table', 'DROP POLICY filter_by_tenant_id O BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT id, pg_advisory_lock(15) FROM test_table ORDER BY 1 DESC; - id | pg_advisory_lock -----+------------------ - 6 | - 5 | - 4 | - 3 | - 2 | - 1 | + id | pg_advisory_lock +--------------------------------------------------------------------- + 6 | + 5 | + 4 | + 3 | + 2 | + 1 | (6 rows) ROLLBACK; SET client_min_messages TO DEFAULT; alter system set deadlock_timeout TO DEFAULT; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) @@ -623,14 +623,14 @@ SET citus.select_opens_transaction_block TO off; -- we use a different advisory lock because previous tests -- still holds the advisory locks since the sessions are still active SELECT id, pg_advisory_xact_lock(16) FROM test_table ORDER BY id; - id | pg_advisory_xact_lock -----+----------------------- - 1 | - 2 | - 3 | - 4 | - 5 | - 6 | + id | pg_advisory_xact_lock +--------------------------------------------------------------------- + 1 | + 2 | + 3 | + 4 | + 5 | + 6 | (6 rows) END; diff --git a/src/test/regress/expected/multi_reference_table.out b/src/test/regress/expected/multi_reference_table.out index ce740f8b4..4a4f79b7f 100644 --- a/src/test/regress/expected/multi_reference_table.out +++ b/src/test/regress/expected/multi_reference_table.out @@ -5,9 +5,9 @@ INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); -- create the reference table SELECT create_reference_table('reference_table_test'); NOTICE: Copying data from local table... - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- see that partkey is NULL @@ -17,8 +17,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'reference_table_test'::regclass; - partmethod | partkeyisnull | repmodel -------------+---------------+---------- + partmethod | partkeyisnull | repmodel +--------------------------------------------------------------------- n | t | t (1 row) @@ -29,8 +29,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'reference_table_test'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1250000 | t | t (1 row) @@ -43,15 +43,15 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'reference_table_test'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | all_placements_healthy | replicated_to_all ----------+------------------------+------------------- + shardid | all_placements_healthy | replicated_to_all +--------------------------------------------------------------------- 1250000 | t | t (1 row) -- check whether data was copied into distributed table SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -68,8 +68,8 @@ SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 @@ -83,8 +83,8 @@ FROM reference_table_test WHERE value_1 = 1; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -95,8 +95,8 @@ FROM reference_table_test ORDER BY 2 ASC LIMIT 3; - value_1 | value_2 ----------+--------- + value_1 | value_2 +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -110,8 +110,8 @@ WHERE value_2 >= 4 ORDER BY 2 LIMIT 3; - value_1 | value_3 ----------+--------- + value_1 | value_3 +--------------------------------------------------------------------- 4 | 4 5 | 5 (2 rows) @@ -123,8 +123,8 @@ FROM ORDER BY 2 ASC LIMIT 2; - value_1 | ?column? ----------+---------- + value_1 | ?column? +--------------------------------------------------------------------- 1 | 15 2 | 30 (2 rows) @@ -135,8 +135,8 @@ FROM reference_table_test ORDER BY 2 ASC LIMIT 2 OFFSET 2; - value_1 | ?column? ----------+---------- + value_1 | ?column? +--------------------------------------------------------------------- 3 | 45 4 | 60 (2 rows) @@ -147,8 +147,8 @@ FROM reference_table_test WHERE value_2 = 2 OR value_2 = 3; - value_2 | value_4 ----------+-------------------------- + value_2 | value_4 +--------------------------------------------------------------------- 2 | Fri Dec 02 00:00:00 2016 3 | Sat Dec 03 00:00:00 2016 (2 rows) @@ -159,8 +159,8 @@ FROM reference_table_test WHERE value_2 = 2 AND value_2 = 3; - value_2 | value_4 ----------+--------- + value_2 | value_4 +--------------------------------------------------------------------- (0 rows) SELECT @@ -169,8 +169,8 @@ FROM reference_table_test WHERE value_3 = '2' OR value_1 = 3; - value_2 | value_4 ----------+-------------------------- + value_2 | value_4 +--------------------------------------------------------------------- 2 | Fri Dec 02 00:00:00 2016 3 | Sat Dec 03 00:00:00 2016 (2 rows) @@ -184,8 +184,8 @@ WHERE value_3 = '2' OR value_1 = 3 ) AND FALSE; - value_2 | value_4 ----------+--------- + value_2 | value_4 +--------------------------------------------------------------------- (0 rows) SELECT @@ -201,8 +201,8 @@ WHERE reference_table_test ) AND value_1 < 3; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (2 rows) @@ -216,8 +216,8 @@ WHERE ( '1', '2' ); - value_4 --------------------------- + value_4 +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 Fri Dec 02 00:00:00 2016 (2 rows) @@ -231,8 +231,8 @@ WHERE ( '5', '2' ); - date_part ------------ + date_part +--------------------------------------------------------------------- 2 5 (2 rows) @@ -243,8 +243,8 @@ FROM reference_table_test WHERE value_2 <= 2 AND value_2 >= 4; - value_4 ---------- + value_4 +--------------------------------------------------------------------- (0 rows) SELECT @@ -253,8 +253,8 @@ FROM reference_table_test WHERE value_2 <= 20 AND value_2 >= 4; - value_4 --------------------------- + value_4 +--------------------------------------------------------------------- Sun Dec 04 00:00:00 2016 Mon Dec 05 00:00:00 2016 (2 rows) @@ -265,8 +265,8 @@ FROM reference_table_test WHERE value_2 >= 5 AND value_2 <= random(); - value_4 ---------- + value_4 +--------------------------------------------------------------------- (0 rows) SELECT @@ -275,8 +275,8 @@ FROM reference_table_test WHERE value_4 BETWEEN '2016-12-01' AND '2016-12-03'; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 1 2 3 @@ -288,8 +288,8 @@ FROM reference_table_test WHERE FALSE; - value_1 ---------- + value_1 +--------------------------------------------------------------------- (0 rows) SELECT @@ -298,8 +298,8 @@ FROM reference_table_test WHERE int4eq(1, 2); - value_1 ---------- + value_1 +--------------------------------------------------------------------- (0 rows) -- rename output name and do some operations @@ -307,8 +307,8 @@ SELECT value_1 as id, value_2 * 15 as age FROM reference_table_test; - id | age -----+----- + id | age +--------------------------------------------------------------------- 1 | 15 2 | 30 3 | 45 @@ -322,8 +322,8 @@ SELECT * FROM some_data; - value_2 | value_4 ----------+-------------------------- + value_2 | value_4 +--------------------------------------------------------------------- 3 | Sat Dec 03 00:00:00 2016 4 | Sun Dec 04 00:00:00 2016 5 | Mon Dec 05 00:00:00 2016 @@ -332,8 +332,8 @@ FROM -- queries with CTEs are supported even if CTE is not referenced inside query WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3) SELECT * FROM reference_table_test ORDER BY 1 LIMIT 1; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -344,8 +344,8 @@ FROM reference_table_test, position('om' in 'Thomas') WHERE value_1 = 1; - value_1 | value_2 | value_3 | value_4 | position ----------+---------+---------+--------------------------+---------- + value_1 | value_2 | value_3 | value_4 | position +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3 (1 row) @@ -355,8 +355,8 @@ FROM reference_table_test, position('om' in 'Thomas') WHERE value_1 = 1 OR value_1 = 2; - value_1 | value_2 | value_3 | value_4 | position ----------+---------+---------+--------------------------+---------- + value_1 | value_2 | value_3 | value_4 | position +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 | 3 (2 rows) @@ -368,8 +368,8 @@ SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 (2 rows) @@ -380,8 +380,8 @@ SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -391,8 +391,8 @@ SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+--------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- (0 rows) -- to make the tests more interested for aggregation tests, ingest some more data @@ -410,8 +410,8 @@ HAVING SUM(value_2) > 3 ORDER BY 1; - value_4 | sum ---------------------------+----- + value_4 | sum +--------------------------------------------------------------------- Fri Dec 02 00:00:00 2016 | 4 Sat Dec 03 00:00:00 2016 | 6 Sun Dec 04 00:00:00 2016 | 4 @@ -427,8 +427,8 @@ FROM GROUP BY GROUPING sets ((value_4), (value_3)) ORDER BY 1, 2, 3; - value_4 | value_3 | sum ---------------------------+---------+----- + value_4 | value_3 | sum +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | | 2 Fri Dec 02 00:00:00 2016 | | 4 Sat Dec 03 00:00:00 2016 | | 6 @@ -448,8 +448,8 @@ FROM reference_table_test ORDER BY 1; - value_4 --------------------------- + value_4 +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 Fri Dec 02 00:00:00 2016 Sat Dec 03 00:00:00 2016 @@ -462,8 +462,8 @@ SELECT value_4, RANK() OVER (PARTITION BY value_1 ORDER BY value_4) FROM reference_table_test; - value_4 | rank ---------------------------+------ + value_4 | rank +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | 1 Thu Dec 01 00:00:00 2016 | 1 Fri Dec 02 00:00:00 2016 | 1 @@ -479,8 +479,8 @@ SELECT value_4, AVG(value_1) OVER (PARTITION BY value_4 ORDER BY value_4) FROM reference_table_test; - value_4 | avg ---------------------------+------------------------ + value_4 | avg +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | 1.00000000000000000000 Thu Dec 01 00:00:00 2016 | 1.00000000000000000000 Fri Dec 02 00:00:00 2016 | 2.0000000000000000 @@ -502,8 +502,8 @@ SELECT END) as c FROM reference_table_test; - c ---- + c +--------------------------------------------------------------------- 3 (1 row) @@ -523,8 +523,8 @@ SELECT value_1 ORDER BY 1; - value_1 | c ----------+--- + value_1 | c +--------------------------------------------------------------------- 1 | 0 2 | 0 3 | 1 @@ -535,8 +535,8 @@ SELECT -- selects inside a transaction works fine as well BEGIN; SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 @@ -548,8 +548,8 @@ SELECT * FROM reference_table_test; (8 rows) SELECT * FROM reference_table_test WHERE value_1 = 1; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (2 rows) @@ -563,27 +563,27 @@ DECLARE test_cursor CURSOR FOR WHERE value_1 = 1 OR value_1 = 2 ORDER BY value_1; FETCH test_cursor; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) FETCH ALL test_cursor; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (3 rows) FETCH test_cursor; -- fetch one row after the last - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+--------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (1 row) @@ -597,16 +597,16 @@ CREATE TEMP TABLE temp_reference_test as -- first create two more tables CREATE TABLE reference_table_test_second (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_second'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE reference_table_test_third (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_third'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- ingest some data to both tables @@ -624,8 +624,8 @@ WHERE t1.value_2 = t2.value_2 ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 1 2 3 @@ -639,8 +639,8 @@ WHERE t1.value_2 = t3.value_2 ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 4 5 (2 rows) @@ -653,8 +653,8 @@ WHERE t2.value_2 = t3.value_2 ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- (0 rows) -- join on different columns and different data types via casts @@ -666,8 +666,8 @@ WHERE t1.value_2 = t2.value_1 ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 1 2 3 @@ -681,8 +681,8 @@ WHERE t1.value_2 = t2.value_3::int ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 1 2 3 @@ -696,8 +696,8 @@ WHERE t1.value_2 = date_part('day', t2.value_4) ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 1 2 3 @@ -713,8 +713,8 @@ WHERE t1.value_2 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_2 ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 3 (1 row) @@ -727,8 +727,8 @@ WHERE t1.value_1 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_1 ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 3 (1 row) @@ -740,8 +740,8 @@ FROM JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 3 (1 row) @@ -753,8 +753,8 @@ FROM LEFT JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 1 2 3 @@ -769,18 +769,18 @@ FROM RIGHT JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 3 - + (2 rows) -- now, lets have some tests on UPSERTs and uniquness CREATE TABLE reference_table_test_fourth (value_1 int, value_2 float PRIMARY KEY, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_fourth'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) \set VERBOSITY terse @@ -795,15 +795,15 @@ ERROR: null value in column "value_2" violates not-null constraint \set VERBOSITY default -- lets run some upserts INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '1', '2016-12-01') ON CONFLICT DO NOTHING RETURNING *; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+--------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- (0 rows) INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '10', '2016-12-01') ON CONFLICT (value_2) DO UPDATE SET value_3 = EXCLUDED.value_3, value_2 = EXCLUDED.value_2 RETURNING *; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 10 | Thu Dec 01 00:00:00 2016 (1 row) @@ -811,8 +811,8 @@ INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '10', '2016-12-01') ON C INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '10', '2016-12-01') ON CONFLICT (value_2) DO UPDATE SET value_3 = EXCLUDED.value_3 || '+10', value_2 = EXCLUDED.value_2 + 10, value_1 = EXCLUDED.value_1 + 10, value_4 = '2016-12-10' RETURNING *; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 11 | 11 | 10+10 | Sat Dec 10 00:00:00 2016 (1 row) @@ -825,8 +825,8 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'reference_table_test_fourth'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | all_placements_healthy | replicated_to_all ----------+------------------------+------------------- + shardid | all_placements_healthy | replicated_to_all +--------------------------------------------------------------------- 1250003 | t | t (1 row) @@ -836,8 +836,8 @@ DELETE FROM WHERE value_1 = 1 RETURNING *; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (2 rows) @@ -847,8 +847,8 @@ DELETE FROM WHERE value_4 = '2016-12-05' RETURNING *; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 5 | 5 | 5 | Mon Dec 05 00:00:00 2016 (1 row) @@ -859,8 +859,8 @@ SET WHERE value_2 = 2 RETURNING *; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 2 | 15 | 2 | Fri Dec 02 00:00:00 2016 2 | 15 | 2 | Fri Dec 02 00:00:00 2016 (2 rows) @@ -871,8 +871,8 @@ UPDATE SET value_2 = 15, value_1 = 45 RETURNING *; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 45 | 15 | 2 | Fri Dec 02 00:00:00 2016 45 | 15 | 2 | Fri Dec 02 00:00:00 2016 45 | 15 | 3 | Sat Dec 03 00:00:00 2016 @@ -883,8 +883,8 @@ RETURNING *; DELETE FROM reference_table_test RETURNING *; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 45 | 15 | 2 | Fri Dec 02 00:00:00 2016 45 | 15 | 2 | Fri Dec 02 00:00:00 2016 45 | 15 | 3 | Sat Dec 03 00:00:00 2016 @@ -895,9 +895,9 @@ RETURNING *; -- some tests with function evaluation and sequences CREATE TABLE reference_table_test_fifth (value_1 serial PRIMARY KEY, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_fifth'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE SEQUENCE example_ref_value_seq; @@ -905,24 +905,24 @@ CREATE SEQUENCE example_ref_value_seq; INSERT INTO reference_table_test_fifth (value_2) VALUES (2) RETURNING value_1, value_2; - value_1 | value_2 ----------+--------- + value_1 | value_2 +--------------------------------------------------------------------- 1 | 2 (1 row) INSERT INTO reference_table_test_fifth (value_2) VALUES (2) RETURNING value_1, value_2; - value_1 | value_2 ----------+--------- + value_1 | value_2 +--------------------------------------------------------------------- 2 | 2 (1 row) INSERT INTO reference_table_test_fifth (value_2, value_3) VALUES (nextval('example_ref_value_seq'), nextval('example_ref_value_seq')::text) RETURNING value_1, value_2, value_3; - value_1 | value_2 | value_3 ----------+---------+--------- + value_1 | value_2 | value_3 +--------------------------------------------------------------------- 3 | 1 | 2 (1 row) @@ -931,8 +931,8 @@ UPDATE WHERE value_1 = 1 RETURNING value_1, value_2, value_4 > '2000-01-01'; - value_1 | value_2 | ?column? ----------+---------+---------- + value_1 | value_2 | ?column? +--------------------------------------------------------------------- 1 | 2 | t (1 row) @@ -959,12 +959,12 @@ INSERT INTO FROM reference_table_test RETURNING *; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Fri Jan 01 00:00:00 2016 | 2 | 2 | Sat Jan 02 00:00:00 2016 - | | 3 | - | | | + | | 3 | + | | | (4 rows) INSERT INTO @@ -974,25 +974,25 @@ INSERT INTO FROM reference_table_test JOIN reference_table_test_second USING (value_1) RETURNING *; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+--------- - | 1 | | + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + | 1 | | (1 row) SET citus.shard_count TO 6; SET citus.shard_replication_factor TO 2; CREATE TABLE colocated_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test', 'value_1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE colocated_table_test_2 (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test_2', 'value_1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) DELETE FROM reference_table_test; @@ -1011,8 +1011,8 @@ FROM WHERE colocated_table_test.value_1 = reference_table_test.value_1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] - value_1 ---------- + value_1 +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1024,8 +1024,8 @@ FROM WHERE colocated_table_test.value_2 = reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] - value_2 ---------- + value_2 +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1037,8 +1037,8 @@ FROM WHERE reference_table_test.value_1 = colocated_table_test.value_1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] - value_2 ---------- + value_2 +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1052,8 +1052,8 @@ WHERE colocated_table_test.value_2 = reference_table_test.value_2 ORDER BY colocated_table_test.value_2; LOG: join order: [ "colocated_table_test_2" ][ cartesian product reference join "reference_table_test" ][ dual partition join "colocated_table_test" ] - value_2 ---------- + value_2 +--------------------------------------------------------------------- 1 1 2 @@ -1068,8 +1068,8 @@ FROM WHERE colocated_table_test.value_1 = colocated_table_test_2.value_1 AND colocated_table_test.value_2 = reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ local partition join "colocated_table_test_2" ] - value_2 ---------- + value_2 +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1082,8 +1082,8 @@ FROM WHERE colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] - value_2 ---------- + value_2 +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1095,8 +1095,8 @@ FROM WHERE colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] - value_2 ---------- + value_2 +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1138,8 +1138,8 @@ FROM WHERE colocated_table_test_2.value_4 = reference_table_test.value_4 RETURNING value_1, value_2; - value_1 | value_2 ----------+--------- + value_1 | value_2 +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -1154,8 +1154,8 @@ FROM WHERE colocated_table_test_2.value_1 > reference_table_test.value_2 RETURNING value_1, value_2; - value_1 | value_2 ----------+--------- + value_1 | value_2 +--------------------------------------------------------------------- 2 | 1 (1 row) @@ -1190,9 +1190,9 @@ ERROR: cannot colocate tables colocated_table_test_2 and reference_table_test DETAIL: Replication models don't match for colocated_table_test_2 and reference_table_test. -- should work sliently SELECT mark_tables_colocated('reference_table_test', ARRAY['reference_table_test_fifth']); - mark_tables_colocated ------------------------ - + mark_tables_colocated +--------------------------------------------------------------------- + (1 row) -- ensure that reference tables on @@ -1201,18 +1201,18 @@ CREATE SCHEMA reference_schema; -- create with schema prefix CREATE TABLE reference_schema.reference_table_test_sixth (value_1 serial PRIMARY KEY, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_schema.reference_table_test_sixth'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SET search_path TO 'reference_schema'; -- create on the schema CREATE TABLE reference_table_test_seventh (value_1 serial PRIMARY KEY, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_seventh'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- ingest some data @@ -1227,8 +1227,8 @@ SELECT value_1 FROM reference_schema.reference_table_test_sixth; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1240,8 +1240,8 @@ FROM reference_table_test_sixth, reference_table_test_seventh WHERE reference_table_test_sixth.value_4 = reference_table_test_seventh.value_4; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1254,8 +1254,8 @@ FROM colocated_table_test_2, reference_schema.reference_table_test_sixth as reftable WHERE colocated_table_test_2.value_4 = reftable.value_4; - value_2 | value_1 ----------+--------- + value_2 | value_1 +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -1272,8 +1272,8 @@ SELECT count(*) FROM reference_table_test; - count -------- + count +--------------------------------------------------------------------- 5 (1 row) @@ -1283,42 +1283,42 @@ SELECT count(*) FROM reference_table_test; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- now try dropping one of the existing reference tables -- and check the metadata SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; - logicalrelid ----------------------------- + logicalrelid +--------------------------------------------------------------------- reference_table_test_fifth (1 row) SELECT logicalrelid FROM pg_dist_shard WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; - logicalrelid ----------------------------- + logicalrelid +--------------------------------------------------------------------- reference_table_test_fifth (1 row) DROP TABLE reference_table_test_fifth; SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; - logicalrelid --------------- + logicalrelid +--------------------------------------------------------------------- (0 rows) SELECT logicalrelid FROM pg_dist_shard WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; - logicalrelid --------------- + logicalrelid +--------------------------------------------------------------------- (0 rows) -- now test DDL changes CREATE TABLE reference_schema.reference_table_ddl (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_schema.reference_table_ddl'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- CREATE & DROP index and check the workers @@ -1334,18 +1334,18 @@ ALTER TABLE reference_schema.reference_table_ddl ALTER COLUMN value_2 SET DEFAUL ALTER TABLE reference_schema.reference_table_ddl ALTER COLUMN value_3 SET NOT NULL; -- see that Citus applied all DDLs to the table SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='reference_schema.reference_table_ddl'::regclass; - Column | Type | Modifiers ----------+-----------------------------+-------------- + Column | Type | Modifiers +--------------------------------------------------------------------- value_2 | double precision | default 25.0 value_3 | text | not null - value_4 | timestamp without time zone | - value_5 | double precision | + value_4 | timestamp without time zone | + value_5 | double precision | (4 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'reference_schema.reference_index_2'::regclass; - Column | Type | Definition ----------+------------------+------------ + Column | Type | Definition +--------------------------------------------------------------------- value_2 | double precision | value_2 value_3 | text | value_3 (2 rows) @@ -1353,18 +1353,18 @@ SELECT "Column", "Type", "Definition" FROM index_attrs WHERE -- also to the shard placements \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='reference_schema.reference_table_ddl_1250019'::regclass; - Column | Type | Modifiers ----------+-----------------------------+-------------- + Column | Type | Modifiers +--------------------------------------------------------------------- value_2 | double precision | default 25.0 value_3 | text | not null - value_4 | timestamp without time zone | - value_5 | double precision | + value_4 | timestamp without time zone | + value_5 | double precision | (4 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'reference_schema.reference_index_2_1250019'::regclass; - Column | Type | Definition ----------+------------------+------------ + Column | Type | Definition +--------------------------------------------------------------------- value_2 | double precision | value_2 value_3 | text | value_3 (2 rows) @@ -1373,18 +1373,18 @@ SELECT "Column", "Type", "Definition" FROM index_attrs WHERE DROP INDEX reference_schema.reference_index_2; \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='reference_schema.reference_table_ddl_1250019'::regclass; - Column | Type | Modifiers ----------+-----------------------------+-------------- + Column | Type | Modifiers +--------------------------------------------------------------------- value_2 | double precision | default 25.0 value_3 | text | not null - value_4 | timestamp without time zone | - value_5 | double precision | + value_4 | timestamp without time zone | + value_5 | double precision | (4 rows) \di reference_schema.reference_index_2* List of relations - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- + Schema | Name | Type | Owner | Table +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -1404,15 +1404,15 @@ DETAIL: We currently don't support creating shards on reference tables SELECT part_storage_type, part_key, part_replica_count, part_max_size, part_placement_policy FROM master_get_table_metadata('reference_schema.reference_table_ddl'); - part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy --------------------+----------+--------------------+---------------+----------------------- + part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy +--------------------------------------------------------------------- t | | 2 | 1536000 | 2 (1 row) SELECT shardid AS a_shard_id FROM pg_dist_shard WHERE logicalrelid = 'reference_schema.reference_table_ddl'::regclass \gset SELECT master_update_shard_statistics(:a_shard_id); - master_update_shard_statistics --------------------------------- + master_update_shard_statistics +--------------------------------------------------------------------- 8192 (1 row) @@ -1421,8 +1421,8 @@ SELECT master_append_table_to_shard(:a_shard_id, 'append_reference_tmp_table', ERROR: cannot append to shardId 1250019 DETAIL: We currently don't support appending to shards in hash-partitioned or reference tables SELECT master_get_table_ddl_events('reference_schema.reference_table_ddl'); - master_get_table_ddl_events ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + master_get_table_ddl_events +--------------------------------------------------------------------- CREATE TABLE reference_schema.reference_table_ddl (value_2 double precision DEFAULT 25.0, value_3 text NOT NULL, value_4 timestamp without time zone, value_5 double precision) ALTER TABLE reference_schema.reference_table_ddl OWNER TO postgres (2 rows) @@ -1433,14 +1433,14 @@ SELECT placementid AS a_placement_id FROM pg_dist_shard_placement WHERE shardid SELECT placementid AS b_placement_id FROM pg_dist_shard_placement WHERE shardid = :a_shard_id AND nodeport = :worker_2_port \gset UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE placementid = :a_placement_id; SELECT master_copy_shard_placement(:a_shard_id, 'localhost', :worker_2_port, 'localhost', :worker_1_port); - master_copy_shard_placement ------------------------------ - + master_copy_shard_placement +--------------------------------------------------------------------- + (1 row) SELECT shardid, shardstate FROM pg_dist_shard_placement WHERE placementid = :a_placement_id; - shardid | shardstate ----------+------------ + shardid | shardstate +--------------------------------------------------------------------- 1250019 | 1 (1 row) @@ -1457,50 +1457,50 @@ RETURNS void AS ' ' LANGUAGE SQL; TRUNCATE reference_table_test; SELECT select_count_all(); - select_count_all ------------------- + select_count_all +--------------------------------------------------------------------- 0 (1 row) SELECT insert_into_ref_table(1, 1.0, '1', '2016-12-01'); - insert_into_ref_table ------------------------ - + insert_into_ref_table +--------------------------------------------------------------------- + (1 row) SELECT insert_into_ref_table(2, 2.0, '2', '2016-12-02'); - insert_into_ref_table ------------------------ - + insert_into_ref_table +--------------------------------------------------------------------- + (1 row) SELECT insert_into_ref_table(3, 3.0, '3', '2016-12-03'); - insert_into_ref_table ------------------------ - + insert_into_ref_table +--------------------------------------------------------------------- + (1 row) SELECT insert_into_ref_table(4, 4.0, '4', '2016-12-04'); - insert_into_ref_table ------------------------ - + insert_into_ref_table +--------------------------------------------------------------------- + (1 row) SELECT insert_into_ref_table(5, 5.0, '5', '2016-12-05'); - insert_into_ref_table ------------------------ - + insert_into_ref_table +--------------------------------------------------------------------- + (1 row) SELECT insert_into_ref_table(6, 6.0, '6', '2016-12-06'); - insert_into_ref_table ------------------------ - + insert_into_ref_table +--------------------------------------------------------------------- + (1 row) SELECT select_count_all(); - select_count_all ------------------- + select_count_all +--------------------------------------------------------------------- 6 (1 row) @@ -1517,8 +1517,8 @@ EXECUTE insert_into_ref_table_pr(5, 5.0, '5', '2016-12-05'); EXECUTE insert_into_ref_table_pr(6, 6.0, '6', '2016-12-06'); -- see the count, then truncate the table SELECT select_count_all(); - select_count_all ------------------- + select_count_all +--------------------------------------------------------------------- 6 (1 row) @@ -1530,24 +1530,24 @@ TRUNCATE reference_table_test; CREATE TYPE reference_comp_key as (key text, value text); CREATE TABLE reference_table_composite (id int PRIMARY KEY, data reference_comp_key); SELECT create_reference_table('reference_table_composite'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- insert and query some data INSERT INTO reference_table_composite (id, data) VALUES (1, ('key_1', 'value_1')::reference_comp_key); INSERT INTO reference_table_composite (id, data) VALUES (2, ('key_2', 'value_2')::reference_comp_key); SELECT * FROM reference_table_composite; - id | data -----+----------------- + id | data +--------------------------------------------------------------------- 1 | (key_1,value_1) 2 | (key_2,value_2) (2 rows) SELECT (data).key FROM reference_table_composite; - key -------- + key +--------------------------------------------------------------------- key_1 key_2 (2 rows) @@ -1557,15 +1557,15 @@ TRUNCATE reference_table_test; BEGIN; INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) ROLLBACK; SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+--------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- (0 rows) -- now insert a row and commit @@ -1573,8 +1573,8 @@ BEGIN; INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); COMMIT; SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (1 row) @@ -1583,8 +1583,8 @@ BEGIN; UPDATE reference_table_test SET value_1 = 10 WHERE value_1 = 2; COMMIT; SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- + value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- 10 | 2 | 2 | Fri Dec 02 00:00:00 2016 (1 row) diff --git a/src/test/regress/expected/multi_remove_node_reference_table.out b/src/test/regress/expected/multi_remove_node_reference_table.out index 662463386..28fd7f502 100644 --- a/src/test/regress/expected/multi_remove_node_reference_table.out +++ b/src/test/regress/expected/multi_remove_node_reference_table.out @@ -11,32 +11,32 @@ CREATE TABLE tmp_shard_placement AS SELECT * FROM pg_dist_shard_placement WHERE DELETE FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; -- make worker 1 receive metadata changes SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) -- remove non-existing node SELECT master_remove_node('localhost', 55555); -ERROR: node at "localhost:55555" does not exist +ERROR: node at "localhost:xxxxx" does not exist -- remove a node with no reference tables -- verify node exist before removal SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) -- verify node is removed SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -45,81 +45,81 @@ SELECT master_add_node('localhost', :worker_2_port) AS worker_2_nodeid \gset SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeid=:worker_2_nodeid \gset -- add a secondary to check we don't attempt to replicate the table to it SELECT 1 FROM master_add_node('localhost', 9000, groupid=>:worker_2_group, noderole=>'secondary'); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) -- remove a node with reference table CREATE TABLE remove_node_reference_table(column1 int); SELECT create_reference_table('remove_node_reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- make sure when we add a secondary we don't attempt to add placements to it SELECT 1 FROM master_add_node('localhost', 9001, groupid=>:worker_2_group, noderole=>'secondary'); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- make sure when we disable a secondary we don't remove any placements SELECT master_disable_node('localhost', 9001); - master_disable_node ---------------------- - + master_disable_node +--------------------------------------------------------------------- + (1 row) SELECT isactive FROM pg_dist_node WHERE nodeport = 9001; - isactive ----------- + isactive +--------------------------------------------------------------------- f (1 row) SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- make sure when we activate a secondary we don't add any placements SELECT 1 FROM master_activate_node('localhost', 9001); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- make sure when we remove a secondary we don't remove any placements SELECT master_remove_node('localhost', 9001); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -129,8 +129,8 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -140,15 +140,15 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -158,22 +158,22 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) \c - - - :master_port SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -183,8 +183,8 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT * @@ -193,15 +193,15 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -211,48 +211,48 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) \c - - - :master_port -- remove same node twice SELECT master_remove_node('localhost', :worker_2_port); -ERROR: node at "localhost:57638" does not exist +ERROR: node at "localhost:xxxxx" does not exist -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) -- try to disable the node before removing it (this used to crash) SELECT master_disable_node('localhost', :worker_2_port); - master_disable_node ---------------------- - + master_disable_node +--------------------------------------------------------------------- + (1 row) SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) -- re-add the node for the next test SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) -- remove node in a transaction and ROLLBACK -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -262,8 +262,8 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -273,15 +273,15 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -291,24 +291,24 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) \c - - - :master_port BEGIN; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) ROLLBACK; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -318,8 +318,8 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -329,15 +329,15 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -347,8 +347,8 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -356,8 +356,8 @@ WHERE -- remove node in a transaction and COMMIT -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -367,8 +367,8 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -378,15 +378,15 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -396,24 +396,24 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) \c - - - :master_port BEGIN; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -423,8 +423,8 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT * @@ -433,15 +433,15 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -451,24 +451,24 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) \c - - - :master_port -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) -- test inserting a value then removing a node in a transaction -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -478,8 +478,8 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -489,15 +489,15 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -507,8 +507,8 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -516,16 +516,16 @@ WHERE BEGIN; INSERT INTO remove_node_reference_table VALUES(1); SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -535,8 +535,8 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT * @@ -545,22 +545,22 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) --verify the data is inserted SELECT * FROM remove_node_reference_table; - column1 ---------- + column1 +--------------------------------------------------------------------- 1 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -570,30 +570,30 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT * FROM remove_node_reference_table; - column1 ---------- + column1 +--------------------------------------------------------------------- 1 (1 row) \c - - - :master_port -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) -- test executing DDL command then removing a node in a transaction -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -603,8 +603,8 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -614,15 +614,15 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -632,8 +632,8 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -641,16 +641,16 @@ WHERE BEGIN; ALTER TABLE remove_node_reference_table ADD column2 int; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -660,8 +660,8 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT * @@ -670,15 +670,15 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -688,33 +688,33 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) \c - - - :master_port SET citus.next_shard_id TO 1380001; -- verify table structure is changed SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.remove_node_reference_table'::regclass; - Column | Type | Modifiers ----------+---------+----------- - column1 | integer | - column2 | integer | + Column | Type | Modifiers +--------------------------------------------------------------------- + column1 | integer | + column2 | integer | (2 rows) -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) -- test DROP table after removing a node in a transaction -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -724,8 +724,8 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -735,24 +735,24 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) BEGIN; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) DROP TABLE remove_node_reference_table; COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -762,43 +762,43 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid = 1380000; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- (0 rows) -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) -- re-create remove_node_reference_table CREATE TABLE remove_node_reference_table(column1 int); SELECT create_reference_table('remove_node_reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- test removing a node while there is a reference table at another schema CREATE SCHEMA remove_node_reference_table_schema; CREATE TABLE remove_node_reference_table_schema.table1(column1 int); SELECT create_reference_table('remove_node_reference_table_schema.table1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -810,8 +810,8 @@ WHERE nodeport = :worker_2_port ORDER BY shardid; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1380001 | 1 | 0 | localhost | 57638 1380002 | 1 | 0 | localhost | 57638 (2 rows) @@ -822,15 +822,15 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table_schema.table1'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -842,23 +842,23 @@ WHERE nodeport = :worker_2_port ORDER BY shardid; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1380001 | 1 | 0 | localhost | 57638 1380002 | 1 | 0 | localhost | 57638 (2 rows) \c - - - :master_port SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -868,8 +868,8 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT * @@ -878,15 +878,15 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table_schema.table1'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -896,25 +896,25 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) \c - - - :master_port -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 -NOTICE: Replicating reference table "table1" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx +NOTICE: Replicating reference table "table1" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) -- test with master_disable_node -- status before master_disable_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -926,8 +926,8 @@ WHERE nodeport = :worker_2_port ORDER BY shardid; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1380001 | 1 | 0 | localhost | 57638 1380002 | 1 | 0 | localhost | 57638 (2 rows) @@ -938,15 +938,15 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -957,23 +957,23 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid ASC; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1380001 | 1 | 0 | localhost | 57638 1380002 | 1 | 0 | localhost | 57638 (2 rows) \c - - - :master_port SELECT master_disable_node('localhost', :worker_2_port); - master_disable_node ---------------------- - + master_disable_node +--------------------------------------------------------------------- + (1 row) -- status after master_disable_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -983,8 +983,8 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT * @@ -993,15 +993,15 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -1011,17 +1011,17 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) \c - - - :master_port -- re-add the node for next tests SELECT 1 FROM master_activate_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 -NOTICE: Replicating reference table "table1" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx +NOTICE: Replicating reference table "table1" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -1030,9 +1030,9 @@ DROP TABLE remove_node_reference_table; DROP TABLE remove_node_reference_table_schema.table1; DROP SCHEMA remove_node_reference_table_schema CASCADE; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) -- reload pg_dist_shard_placement table diff --git a/src/test/regress/expected/multi_repair_shards.out b/src/test/regress/expected/multi_repair_shards.out index 17a9c45eb..44ce0818f 100644 --- a/src/test/regress/expected/multi_repair_shards.out +++ b/src/test/regress/expected/multi_repair_shards.out @@ -15,9 +15,9 @@ CREATE INDEX ON customer_engagements (event_data); SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('customer_engagements', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- ingest some data for the tests @@ -27,7 +27,7 @@ INSERT INTO customer_engagements VALUES (1, '03-01-2015', 'third event'); -- the following queries does the following: -- (i) create a new shard -- (ii) mark the second shard placements as unhealthy --- (iii) do basic checks i.e., only allow copy from healthy placement to unhealthy ones +-- (iii) do basic checks i.e., only allow copy from healthy placement to unhealthy ones -- (iv) do a successful master_copy_shard_placement from the first placement to the second -- (v) mark the first placement as unhealthy and execute a query that is routed to the second placement -- get the newshardid @@ -50,18 +50,18 @@ ROLLBACK; -- modifications after reparing a shard are fine (will use new metadata) BEGIN; SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); - master_copy_shard_placement ------------------------------ - + master_copy_shard_placement +--------------------------------------------------------------------- + (1 row) ALTER TABLE customer_engagements ADD COLUMN value float; ROLLBACK; BEGIN; SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); - master_copy_shard_placement ------------------------------ - + master_copy_shard_placement +--------------------------------------------------------------------- + (1 row) INSERT INTO customer_engagements VALUES (4, '04-01-2015', 'fourth event'); @@ -76,17 +76,17 @@ SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_2_port, 'lo ERROR: source placement must be in finalized state -- "copy" this shard from the first placement to the second one SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); - master_copy_shard_placement ------------------------------ - + master_copy_shard_placement +--------------------------------------------------------------------- + (1 row) -- now, update first placement as unhealthy (and raise a notice) so that queries are not routed to there UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :newshardid AND groupid = :worker_1_group; -- get the data from the second placement SELECT * FROM customer_engagements; - id | created_at | event_data -----+------------+-------------- + id | created_at | event_data +--------------------------------------------------------------------- 1 | 01-01-2015 | first event 2 | 02-01-2015 | second event 1 | 03-01-2015 | third event @@ -104,9 +104,9 @@ SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('remote_engagements', 'id', 'hash'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- get the newshardid diff --git a/src/test/regress/expected/multi_repartition_join_planning.out b/src/test/regress/expected/multi_repartition_join_planning.out index 7290429d4..3f3340469 100644 --- a/src/test/regress/expected/multi_repartition_join_planning.out +++ b/src/test/regress/expected/multi_repartition_join_planning.out @@ -28,15 +28,15 @@ CREATE TABLE stock ( PRIMARY KEY (s_w_id,s_i_id) ); SELECT create_distributed_table('order_line','ol_w_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('stock','s_w_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -71,8 +71,8 @@ DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290002 orders ON ((lineitem.l_orderkey OPERATOR(pg_catalog.=) orders.o_orderkey))) WHERE ((lineitem.l_partkey OPERATOR(pg_catalog.<) 1000) AND (orders.o_totalprice OPERATOR(pg_catalog.>) 10.0))" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290003 orders ON ((lineitem.l_orderkey OPERATOR(pg_catalog.=) orders.o_orderkey))) WHERE ((lineitem.l_partkey OPERATOR(pg_catalog.<) 1000) AND (orders.o_totalprice OPERATOR(pg_catalog.>) 10.0))" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: generated sql query for task 2 @@ -83,8 +83,8 @@ DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 3 DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 6 -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 4 to node localhost:57638 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: join prunable for intervals [1,1000] and [1001,2000] DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [1001,2000] and [1,1000] @@ -103,17 +103,17 @@ DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 11 -DEBUG: assigned task 4 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 -DEBUG: assigned task 2 to node localhost:57637 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: completed cleanup query for job 3 DEBUG: completed cleanup query for job 3 DEBUG: completed cleanup query for job 2 DEBUG: completed cleanup query for job 2 DEBUG: completed cleanup query for job 1 DEBUG: completed cleanup query for job 1 - l_partkey | o_orderkey | count ------------+------------+------- + l_partkey | o_orderkey | count +--------------------------------------------------------------------- 18 | 12005 | 1 79 | 5121 | 1 91 | 2883 | 1 @@ -161,14 +161,14 @@ DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity OPERATOR(pg_catalog.<) 5.0)" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity OPERATOR(pg_catalog.<) 5.0)" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE (o_totalprice OPERATOR(pg_catalog.<>) 4.0)" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE (o_totalprice OPERATOR(pg_catalog.<>) 4.0)" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -205,18 +205,18 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 -DEBUG: assigned task 3 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 -DEBUG: assigned task 9 to node localhost:57637 -DEBUG: assigned task 12 to node localhost:57638 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: completed cleanup query for job 6 DEBUG: completed cleanup query for job 6 DEBUG: completed cleanup query for job 4 DEBUG: completed cleanup query for job 4 DEBUG: completed cleanup query for job 5 DEBUG: completed cleanup query for job 5 - l_partkey | o_orderkey | count ------------+------------+------- + l_partkey | o_orderkey | count +--------------------------------------------------------------------- (0 rows) -- Check that grouping by primary key allows o_shippriority to be in the target list @@ -235,14 +235,14 @@ DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290000 lineitem WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290001 lineitem WHERE true" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE true" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -279,18 +279,18 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 6 to node localhost:57637 -DEBUG: assigned task 9 to node localhost:57638 -DEBUG: assigned task 12 to node localhost:57637 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: completed cleanup query for job 9 DEBUG: completed cleanup query for job 9 DEBUG: completed cleanup query for job 7 DEBUG: completed cleanup query for job 7 DEBUG: completed cleanup query for job 8 DEBUG: completed cleanup query for job 8 - o_orderkey | o_shippriority | count -------------+----------------+------- + o_orderkey | o_shippriority | count +--------------------------------------------------------------------- (0 rows) -- Check that grouping by primary key allows o_shippriority to be in the target @@ -311,14 +311,14 @@ DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290000 lineitem WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290001 lineitem WHERE true" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE true" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -355,18 +355,18 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 -DEBUG: assigned task 3 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 -DEBUG: assigned task 9 to node localhost:57637 -DEBUG: assigned task 12 to node localhost:57638 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: completed cleanup query for job 12 DEBUG: completed cleanup query for job 12 DEBUG: completed cleanup query for job 10 DEBUG: completed cleanup query for job 10 DEBUG: completed cleanup query for job 11 DEBUG: completed cleanup query for job 11 - o_orderkey | o_shippriority | count -------------+----------------+------- + o_orderkey | o_shippriority | count +--------------------------------------------------------------------- (0 rows) -- Check that calling any_value manually works as well @@ -385,14 +385,14 @@ DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290000 lineitem WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290001 lineitem WHERE true" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE true" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -429,18 +429,18 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 6 to node localhost:57637 -DEBUG: assigned task 9 to node localhost:57638 -DEBUG: assigned task 12 to node localhost:57637 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: completed cleanup query for job 15 DEBUG: completed cleanup query for job 15 DEBUG: completed cleanup query for job 13 DEBUG: completed cleanup query for job 13 DEBUG: completed cleanup query for job 14 DEBUG: completed cleanup query for job 14 - o_orderkey | any_value -------------+----------- + o_orderkey | any_value +--------------------------------------------------------------------- (0 rows) -- Check that grouping by primary key allows s_quantity to be in the having @@ -461,10 +461,10 @@ DEBUG: generated sql query for task 3 DETAIL: query string: "SELECT s_i_id, s_w_id, s_quantity FROM stock_690006 stock WHERE true" DEBUG: generated sql query for task 4 DETAIL: query string: "SELECT s_i_id, s_w_id, s_quantity FROM stock_690007 stock WHERE true" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 -DEBUG: assigned task 4 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT ol_i_id FROM order_line_690000 order_line WHERE true" DEBUG: generated sql query for task 2 @@ -473,10 +473,10 @@ DEBUG: generated sql query for task 3 DETAIL: query string: "SELECT ol_i_id FROM order_line_690002 order_line WHERE true" DEBUG: generated sql query for task 4 DETAIL: query string: "SELECT ol_i_id FROM order_line_690003 order_line WHERE true" -DEBUG: assigned task 1 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57638 -DEBUG: assigned task 3 to node localhost:57637 -DEBUG: assigned task 4 to node localhost:57638 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -513,18 +513,18 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 20 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 20 -DEBUG: assigned task 3 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 -DEBUG: assigned task 9 to node localhost:57637 -DEBUG: assigned task 12 to node localhost:57638 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: completed cleanup query for job 18 DEBUG: completed cleanup query for job 18 DEBUG: completed cleanup query for job 16 DEBUG: completed cleanup query for job 16 DEBUG: completed cleanup query for job 17 DEBUG: completed cleanup query for job 17 - s_i_id --------- + s_i_id +--------------------------------------------------------------------- (0 rows) -- Reset client logging level to its previous value diff --git a/src/test/regress/expected/multi_repartition_join_pruning.out b/src/test/regress/expected/multi_repartition_join_pruning.out index aab563930..b32d88cd8 100644 --- a/src/test/regress/expected/multi_repartition_join_pruning.out +++ b/src/test/regress/expected/multi_repartition_join_pruning.out @@ -28,8 +28,8 @@ DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 6 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 9 - QUERY PLAN -------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 3 @@ -58,8 +58,8 @@ DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 6 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 9 - count -------- + count +--------------------------------------------------------------------- 2985 (1 row) @@ -74,8 +74,8 @@ WHERE o_custkey = c_custkey AND o_orderkey < 0; DEBUG: Router planner does not support append-partitioned tables. - QUERY PLAN -------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 0 @@ -93,8 +93,8 @@ WHERE o_custkey = c_custkey AND o_orderkey < 0; DEBUG: Router planner does not support append-partitioned tables. - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -109,8 +109,8 @@ WHERE o_custkey = c_custkey AND c_custkey < 0; DEBUG: Router planner does not support append-partitioned tables. - QUERY PLAN -------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 0 @@ -128,8 +128,8 @@ WHERE o_custkey = c_custkey AND c_custkey < 0; DEBUG: Router planner does not support append-partitioned tables. - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -172,8 +172,8 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 16 - QUERY PLAN -------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 4 @@ -221,8 +221,8 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 16 - count -------- + count +--------------------------------------------------------------------- 125 (1 row) @@ -237,8 +237,8 @@ WHERE l_partkey = c_nationkey AND l_orderkey < 0; DEBUG: Router planner does not support append-partitioned tables. - QUERY PLAN -------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 0 @@ -259,8 +259,8 @@ WHERE l_partkey = c_nationkey AND l_orderkey < 0; DEBUG: Router planner does not support append-partitioned tables. - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -273,8 +273,8 @@ FROM WHERE false; DEBUG: Router planner does not support append-partitioned tables. - QUERY PLAN -------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) Task Count: 0 Tasks Shown: None, not supported for re-partition queries @@ -291,8 +291,8 @@ FROM WHERE false; DEBUG: Router planner does not support append-partitioned tables. - o_orderkey ------------- + o_orderkey +--------------------------------------------------------------------- (0 rows) EXPLAIN (COSTS OFF) @@ -303,8 +303,8 @@ FROM WHERE 1=0 AND c_custkey < 0; DEBUG: Router planner does not support append-partitioned tables. - QUERY PLAN -------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) Task Count: 0 Tasks Shown: None, not supported for re-partition queries @@ -319,8 +319,8 @@ SELECT FROM orders INNER JOIN customer_append ON (o_custkey = c_custkey AND false); DEBUG: Router planner does not support append-partitioned tables. - QUERY PLAN ----------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) Task Count: 0 Tasks Shown: All @@ -334,8 +334,8 @@ FROM WHERE o_custkey = c_custkey AND false; DEBUG: Router planner does not support append-partitioned tables. - QUERY PLAN ----------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) Task Count: 0 Tasks Shown: All diff --git a/src/test/regress/expected/multi_repartition_join_ref.out b/src/test/regress/expected/multi_repartition_join_ref.out index b0fc05bda..9d14058ed 100644 --- a/src/test/regress/expected/multi_repartition_join_ref.out +++ b/src/test/regress/expected/multi_repartition_join_ref.out @@ -15,8 +15,8 @@ ORDER BY LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 - l_partkey | l_suppkey | count ------------+-----------+------- + l_partkey | l_suppkey | count +--------------------------------------------------------------------- 195 | 196 | 804 245 | 246 | 754 278 | 279 | 721 @@ -43,8 +43,8 @@ ORDER BY LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 - l_partkey | l_suppkey | count ------------+-----------+------- + l_partkey | l_suppkey | count +--------------------------------------------------------------------- 195 | 196 | 1 245 | 246 | 1 278 | 279 | 1 @@ -71,8 +71,8 @@ ORDER BY LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 - l_partkey | l_suppkey | count ------------+-----------+------- + l_partkey | l_suppkey | count +--------------------------------------------------------------------- 195 | 196 | 1 245 | 246 | 1 278 | 279 | 1 @@ -98,8 +98,8 @@ ORDER BY LIMIT 10; LOG: join order: [ "lineitem" ][ single range partition join "part_append" ][ cartesian product reference join "supplier" ] DEBUG: push down of limit count: 10 - l_partkey | l_suppkey | count ------------+-----------+------- + l_partkey | l_suppkey | count +--------------------------------------------------------------------- 18 | 7519 | 1000 79 | 7580 | 1000 91 | 2592 | 1000 @@ -126,8 +126,8 @@ ORDER BY LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 - l_partkey | l_suppkey | count ------------+-----------+------- + l_partkey | l_suppkey | count +--------------------------------------------------------------------- 195 | 196 | 1 245 | 246 | 1 278 | 279 | 1 @@ -154,8 +154,8 @@ ORDER BY LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 - l_partkey | l_suppkey | count ------------+-----------+------- + l_partkey | l_suppkey | count +--------------------------------------------------------------------- 195 | 196 | 1 245 | 246 | 1 278 | 279 | 1 @@ -182,8 +182,8 @@ ORDER BY LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 - l_partkey | l_suppkey | count ------------+-----------+------- + l_partkey | l_suppkey | count +--------------------------------------------------------------------- 18 | 7519 | 1 79 | 7580 | 1 91 | 2592 | 1 @@ -210,8 +210,8 @@ ORDER BY LIMIT 10; LOG: join order: [ "lineitem" ][ single range partition join "part_append" ][ reference join "supplier" ] DEBUG: push down of limit count: 10 - l_partkey | l_suppkey | count ------------+-----------+------- + l_partkey | l_suppkey | count +--------------------------------------------------------------------- 18 | 7519 | 1 79 | 7580 | 1 91 | 2592 | 1 diff --git a/src/test/regress/expected/multi_repartition_join_task_assignment.out b/src/test/regress/expected/multi_repartition_join_task_assignment.out index f504ce3fb..d3146999d 100644 --- a/src/test/regress/expected/multi_repartition_join_task_assignment.out +++ b/src/test/regress/expected/multi_repartition_join_task_assignment.out @@ -18,8 +18,8 @@ FROM WHERE o_custkey = c_custkey; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: join prunable for intervals [1,1000] and [1001,2000] DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [1001,2000] and [1,1000] @@ -32,11 +32,11 @@ DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 6 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 9 -DEBUG: assigned task 4 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 -DEBUG: assigned task 2 to node localhost:57637 - count -------- +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx + count +--------------------------------------------------------------------- 2985 (1 row) @@ -52,19 +52,19 @@ WHERE o_custkey = c_custkey AND o_orderkey = l_orderkey; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 1 to node localhost:57637 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: join prunable for intervals [1,5986] and [8997,14947] DEBUG: join prunable for intervals [8997,14947] and [1,5986] DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 4 DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 8 -DEBUG: assigned task 4 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57638 - count -------- +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx + count +--------------------------------------------------------------------- 12000 (1 row) @@ -77,11 +77,11 @@ FROM WHERE l_partkey = c_nationkey; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 1 to node localhost:57637 +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -110,12 +110,12 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 16 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 6 to node localhost:57637 -DEBUG: assigned task 9 to node localhost:57638 -DEBUG: assigned task 12 to node localhost:57637 - count -------- +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx + count +--------------------------------------------------------------------- 125 (1 row) diff --git a/src/test/regress/expected/multi_repartition_udt.out b/src/test/regress/expected/multi_repartition_udt.out index 5e6842292..a1fc0c27d 100644 --- a/src/test/regress/expected/multi_repartition_udt.out +++ b/src/test/regress/expected/multi_repartition_udt.out @@ -47,7 +47,7 @@ CREATE TABLE repartition_udt_other ( udtcol test_udt, txtcol text ); --- Connect directly to a worker, create and drop the type, then +-- Connect directly to a worker, create and drop the type, then -- proceed with type creation as above; thus the OIDs will be different. -- so that the OID is off. \c - - - :worker_1_port @@ -126,16 +126,16 @@ FUNCTION 1 test_udt_hash(test_udt); SET citus.shard_count TO 3; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('repartition_udt', 'pk', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET citus.shard_count TO 5; SELECT create_distributed_table('repartition_udt_other', 'pk', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO repartition_udt values (1, '(1,1)'::test_udt, 'foo'); @@ -153,12 +153,12 @@ INSERT INTO repartition_udt_other values (12, '(2,3)'::test_udt, 'foo'); SET client_min_messages = LOG; -- This query was intended to test "Query that should result in a repartition -- join on int column, and be empty." In order to remove broadcast logic, we --- manually make the query router plannable. +-- manually make the query router plannable. SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk WHERE repartition_udt.pk = 1; - pk | udtcol | txtcol | pk | udtcol | txtcol -----+--------+--------+----+--------+-------- + pk | udtcol | txtcol | pk | udtcol | txtcol +--------------------------------------------------------------------- (0 rows) -- Query that should result in a repartition join on UDT column. @@ -168,8 +168,8 @@ EXPLAIN SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.udtcol = repartition_udt_other.udtcol WHERE repartition_udt.pk > 1; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] - QUERY PLAN --------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) Task Count: 4 Tasks Shown: None, not supported for re-partition queries @@ -186,8 +186,8 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other WHERE repartition_udt.pk > 1 ORDER BY repartition_udt.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] - pk | udtcol | txtcol | pk | udtcol | txtcol -----+--------+--------+----+--------+-------- + pk | udtcol | txtcol | pk | udtcol | txtcol +--------------------------------------------------------------------- 2 | (1,2) | foo | 8 | (1,2) | foo 3 | (1,3) | foo | 9 | (1,3) | foo 4 | (2,1) | foo | 10 | (2,1) | foo diff --git a/src/test/regress/expected/multi_repartitioned_subquery_udf.out b/src/test/regress/expected/multi_repartitioned_subquery_udf.out index a65177441..12de639f6 100644 --- a/src/test/regress/expected/multi_repartitioned_subquery_udf.out +++ b/src/test/regress/expected/multi_repartitioned_subquery_udf.out @@ -6,32 +6,32 @@ SET citus.next_shard_id TO 830000; \c - - - :master_port DROP FUNCTION IF EXISTS median(double precision[]); NOTICE: function median(pg_catalog.float8[]) does not exist, skipping -CREATE FUNCTION median(double precision[]) RETURNS double precision -LANGUAGE sql IMMUTABLE AS $_$ - SELECT AVG(val) FROM - (SELECT val FROM unnest($1) val - ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) - OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; +CREATE FUNCTION median(double precision[]) RETURNS double precision +LANGUAGE sql IMMUTABLE AS $_$ + SELECT AVG(val) FROM + (SELECT val FROM unnest($1) val + ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) + OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; $_$; \c - - - :worker_1_port DROP FUNCTION IF EXISTS median(double precision[]); NOTICE: function median(pg_catalog.float8[]) does not exist, skipping -CREATE FUNCTION median(double precision[]) RETURNS double precision -LANGUAGE sql IMMUTABLE AS $_$ - SELECT AVG(val) FROM - (SELECT val FROM unnest($1) val - ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) - OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; +CREATE FUNCTION median(double precision[]) RETURNS double precision +LANGUAGE sql IMMUTABLE AS $_$ + SELECT AVG(val) FROM + (SELECT val FROM unnest($1) val + ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) + OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; $_$; \c - - - :worker_2_port DROP FUNCTION IF EXISTS median(double precision[]); NOTICE: function median(pg_catalog.float8[]) does not exist, skipping -CREATE FUNCTION median(double precision[]) RETURNS double precision -LANGUAGE sql IMMUTABLE AS $_$ - SELECT AVG(val) FROM - (SELECT val FROM unnest($1) val - ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) - OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; +CREATE FUNCTION median(double precision[]) RETURNS double precision +LANGUAGE sql IMMUTABLE AS $_$ + SELECT AVG(val) FROM + (SELECT val FROM unnest($1) val + ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) + OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; $_$; -- Run query on master \c - - - :master_port @@ -39,7 +39,7 @@ SET citus.task_executor_type TO 'task-tracker'; SELECT * FROM (SELECT median(ARRAY[1,2,sum(l_suppkey)]) as median, count(*) FROM lineitem GROUP BY l_partkey) AS a WHERE median > 2; - median | count ---------+------- + median | count +--------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_replicate_reference_table.out b/src/test/regress/expected/multi_replicate_reference_table.out index 5a724afa5..714e8353e 100644 --- a/src/test/regress/expected/multi_replicate_reference_table.out +++ b/src/test/regress/expected/multi_replicate_reference_table.out @@ -10,29 +10,29 @@ ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1370000; CREATE TABLE tmp_shard_placement AS SELECT * FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; DELETE FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) -- test adding new node with no reference tables -- verify there is no node with nodeport = :worker_2_port before adding the node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) -- verify node is added SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -43,38 +43,38 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) -- test adding new node with a reference table which does not have any healthy placement SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) -- verify there is no node with nodeport = :worker_2_port before adding the node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) CREATE TABLE replicate_reference_table_unhealthy(column1 int); SELECT create_reference_table('replicate_reference_table_unhealthy'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1370000; SELECT 1 FROM master_add_node('localhost', :worker_2_port); -ERROR: could not find any healthy placement for shard 1370000 +ERROR: could not find any healthy placement for shard xxxxx -- verify node is not added SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -85,17 +85,17 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) DROP TABLE replicate_reference_table_unhealthy; -- test replicating a reference table when a new node added CREATE TABLE replicate_reference_table_valid(column1 int); SELECT create_reference_table('replicate_reference_table_valid'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- status before master_add_node @@ -106,8 +106,8 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT * @@ -116,15 +116,15 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "replicate_reference_table_valid" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "replicate_reference_table_valid" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -136,8 +136,8 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1370001 | 1 | 0 | localhost | 57638 (1 row) @@ -147,8 +147,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -161,8 +161,8 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1370001 | 1 | 0 | localhost | 57638 (1 row) @@ -172,14 +172,14 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -191,8 +191,8 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1370001 | 1 | 0 | localhost | 57638 (1 row) @@ -202,24 +202,24 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) DROP TABLE replicate_reference_table_valid; -- test replicating a reference table when a new node added in TRANSACTION + ROLLBACK SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) CREATE TABLE replicate_reference_table_rollback(column1 int); SELECT create_reference_table('replicate_reference_table_rollback'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- status before master_add_node @@ -230,8 +230,8 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT * @@ -240,16 +240,16 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_rollback'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "replicate_reference_table_rollback" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "replicate_reference_table_rollback" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -262,8 +262,8 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT * @@ -272,8 +272,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_rollback'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -281,9 +281,9 @@ DROP TABLE replicate_reference_table_rollback; -- test replicating a reference table when a new node added in TRANSACTION + COMMIT CREATE TABLE replicate_reference_table_commit(column1 int); SELECT create_reference_table('replicate_reference_table_commit'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- status before master_add_node @@ -294,8 +294,8 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT * @@ -304,16 +304,16 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_commit'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "replicate_reference_table_commit" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "replicate_reference_table_commit" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -326,8 +326,8 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1370003 | 1 | 0 | localhost | 57638 (1 row) @@ -337,24 +337,24 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_commit'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) DROP TABLE replicate_reference_table_commit; -- test adding new node + upgrading another hash distributed table to reference table + creating new reference table in TRANSACTION SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) CREATE TABLE replicate_reference_table_reference_one(column1 int); SELECT create_reference_table('replicate_reference_table_reference_one'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SET citus.shard_count TO 1; @@ -362,9 +362,9 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE replicate_reference_table_hash(column1 int); SELECT create_distributed_table('replicate_reference_table_hash', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- update replication model to statement-based replication since streaming replicated tables cannot be upgraded to reference tables @@ -377,8 +377,8 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT * @@ -387,8 +387,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_reference_one'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -400,8 +400,8 @@ FROM WHERE logicalrelid IN ('replicate_reference_table_reference_one', 'replicate_reference_table_hash', 'replicate_reference_table_reference_two') ORDER BY logicalrelid; - logicalrelid | partmethod | ?column? | repmodel ------------------------------------------+------------+----------+---------- + logicalrelid | partmethod | ?column? | repmodel +--------------------------------------------------------------------- replicate_reference_table_reference_one | n | t | t replicate_reference_table_hash | h | f | c (2 rows) @@ -409,21 +409,21 @@ ORDER BY logicalrelid; BEGIN; SET LOCAL client_min_messages TO ERROR; SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT upgrade_to_reference_table('replicate_reference_table_hash'); - upgrade_to_reference_table ----------------------------- - + upgrade_to_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('replicate_reference_table_reference_two'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) COMMIT; @@ -435,8 +435,8 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1370004 | 1 | 0 | localhost | 57638 1370005 | 1 | 0 | localhost | 57638 1370006 | 1 | 0 | localhost | 57638 @@ -448,8 +448,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_reference_one'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -459,10 +459,10 @@ FROM pg_dist_partition WHERE logicalrelid IN ('replicate_reference_table_reference_one', 'replicate_reference_table_hash', 'replicate_reference_table_reference_two') -ORDER BY +ORDER BY logicalrelid; - logicalrelid | partmethod | ?column? | repmodel ------------------------------------------+------------+----------+---------- + logicalrelid | partmethod | ?column? | repmodel +--------------------------------------------------------------------- replicate_reference_table_reference_one | n | t | t replicate_reference_table_hash | n | t | t replicate_reference_table_reference_two | n | t | t @@ -473,16 +473,16 @@ DROP TABLE replicate_reference_table_hash; DROP TABLE replicate_reference_table_reference_two; -- test inserting a value then adding a new node in a transaction SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) CREATE TABLE replicate_reference_table_insert(column1 int); SELECT create_reference_table('replicate_reference_table_insert'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -494,9 +494,9 @@ DROP TABLE replicate_reference_table_insert; -- test COPY then adding a new node in a transaction CREATE TABLE replicate_reference_table_copy(column1 int); SELECT create_reference_table('replicate_reference_table_copy'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -508,9 +508,9 @@ DROP TABLE replicate_reference_table_copy; -- test executing DDL command then adding a new node in a transaction CREATE TABLE replicate_reference_table_ddl(column1 int); SELECT create_reference_table('replicate_reference_table_ddl'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -522,9 +522,9 @@ DROP TABLE replicate_reference_table_ddl; -- test DROP table after adding new node in a transaction CREATE TABLE replicate_reference_table_drop(column1 int); SELECT create_reference_table('replicate_reference_table_drop'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- status before master_add_node @@ -535,8 +535,8 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT * @@ -545,16 +545,16 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_drop'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "replicate_reference_table_drop" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "replicate_reference_table_drop" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -568,28 +568,28 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid = 1370009; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- (0 rows) -- test adding a node while there is a reference table at another schema SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) CREATE SCHEMA replicate_reference_table_schema; CREATE TABLE replicate_reference_table_schema.table1(column1 int); SELECT create_reference_table('replicate_reference_table_schema.table1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- status before master_add_node @@ -600,8 +600,8 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT * @@ -610,15 +610,15 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_schema.table1'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "table1" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "table1" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -630,8 +630,8 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1370011 | 1 | 0 | localhost | 57638 (1 row) @@ -641,8 +641,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_schema.table1'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -650,9 +650,9 @@ DROP TABLE replicate_reference_table_schema.table1; DROP SCHEMA replicate_reference_table_schema CASCADE; -- test adding a node when there are foreign keys between reference tables SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) CREATE TABLE ref_table_1(id int primary key, v int); @@ -661,9 +661,9 @@ CREATE TABLE ref_table_3(id int primary key, v int references ref_table_2(id)); SELECT create_reference_table('ref_table_1'), create_reference_table('ref_table_2'), create_reference_table('ref_table_3'); - create_reference_table | create_reference_table | create_reference_table -------------------------+------------------------+------------------------ - | | + create_reference_table | create_reference_table | create_reference_table +--------------------------------------------------------------------- + | | (1 row) -- status before master_add_node @@ -674,16 +674,16 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- (0 rows) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "ref_table_1" to the node localhost:57638 -NOTICE: Replicating reference table "ref_table_2" to the node localhost:57638 -NOTICE: Replicating reference table "ref_table_3" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "ref_table_1" to the node localhost:xxxxx +NOTICE: Replicating reference table "ref_table_2" to the node localhost:xxxxx +NOTICE: Replicating reference table "ref_table_3" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -695,8 +695,8 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1370012 | 1 | 0 | localhost | 57638 1370013 | 1 | 0 | localhost | 57638 1370014 | 1 | 0 | localhost | 57638 @@ -704,8 +704,8 @@ ORDER BY shardid, nodeport; -- verify constraints have been created on the new node SELECT run_command_on_workers('select count(*) from pg_constraint where contype=''f'' AND conname like ''ref_table%'';'); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,2) (localhost,57638,t,2) (2 rows) @@ -713,21 +713,21 @@ SELECT run_command_on_workers('select count(*) from pg_constraint where contype= DROP TABLE ref_table_1, ref_table_2, ref_table_3; -- do some tests with inactive node SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) CREATE TABLE initially_not_replicated_reference_table (key int); SELECT create_reference_table('initially_not_replicated_reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -737,24 +737,24 @@ SELECT FROM pg_dist_shard_placement WHERE - shardid IN (SELECT - shardid - FROM - pg_dist_shard - WHERE + shardid IN (SELECT + shardid + FROM + pg_dist_shard + WHERE logicalrelid = 'initially_not_replicated_reference_table'::regclass) AND nodeport != :master_port ORDER BY 1,4,5; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1370015 | 1 | 0 | localhost | 57637 (1 row) -- we should see the two shard placements after activation SELECT 1 FROM master_activate_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "initially_not_replicated_reference_table" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "initially_not_replicated_reference_table" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -763,24 +763,24 @@ SELECT FROM pg_dist_shard_placement WHERE - shardid IN (SELECT - shardid - FROM - pg_dist_shard - WHERE + shardid IN (SELECT + shardid + FROM + pg_dist_shard + WHERE logicalrelid = 'initially_not_replicated_reference_table'::regclass) AND nodeport != :master_port ORDER BY 1,4,5; - shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- + shardid | shardstate | shardlength | nodename | nodeport +--------------------------------------------------------------------- 1370015 | 1 | 0 | localhost | 57637 1370015 | 1 | 0 | localhost | 57638 (2 rows) -- this should have no effect SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/multi_router_planner.out b/src/test/regress/expected/multi_router_planner.out index 871ef8a68..2b470c5dc 100644 --- a/src/test/regress/expected/multi_router_planner.out +++ b/src/test/regress/expected/multi_router_planner.out @@ -33,40 +33,40 @@ CREATE TABLE authors_reference ( name varchar(20), id bigint ); -- this table is used in router executor tests CREATE TABLE articles_single_shard_hash (LIKE articles_hash); SELECT master_create_distributed_table('articles_hash', 'author_id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_distributed_table('articles_single_shard_hash', 'author_id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) -- test when a table is distributed but no shards created yet SELECT count(*) from articles_hash; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT master_create_worker_shards('articles_hash', 2, 1); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('articles_single_shard_hash', 1, 1); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('authors_reference'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- create a bunch of test data @@ -133,8 +133,8 @@ SELECT * FROM articles_hash WHERE author_id = 10 AND id = 50; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - id | author_id | title | word_count -----+-----------+-----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 50 | 10 | anjanette | 19519 (1 row) @@ -143,8 +143,8 @@ SELECT title FROM articles_hash WHERE author_id = 10; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - title ------------- + title +--------------------------------------------------------------------- aggrandize absentness andelee @@ -159,8 +159,8 @@ SELECT title, word_count FROM articles_hash DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - title | word_count -------------+------------ + title | word_count +--------------------------------------------------------------------- anjanette | 19519 aggrandize | 17277 attemper | 14976 @@ -176,8 +176,8 @@ SELECT title, id FROM articles_hash DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - title | id ----------+---- + title | id +--------------------------------------------------------------------- aruru | 5 adversa | 15 (2 rows) @@ -189,8 +189,8 @@ SELECT title, author_id FROM articles_hash ORDER BY author_id ASC, id; DEBUG: Creating router plan DEBUG: Plan is router executable - title | author_id --------------+----------- + title | author_id +--------------------------------------------------------------------- aseptic | 7 auriga | 7 arsenous | 7 @@ -208,8 +208,8 @@ SELECT title, author_id FROM articles_hash WHERE author_id = 7 OR author_id = 8; DEBUG: Creating router plan DEBUG: Plan is router executable - title | author_id --------------+----------- + title | author_id +--------------------------------------------------------------------- aseptic | 7 agatized | 8 auriga | 7 @@ -231,8 +231,8 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash ORDER BY sum(word_count) DESC; DEBUG: Creating router plan DEBUG: Plan is router executable - author_id | corpus_size ------------+------------- + author_id | corpus_size +--------------------------------------------------------------------- 10 | 59955 8 | 55410 7 | 36756 @@ -248,8 +248,8 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - author_id | corpus_size ------------+------------- + author_id | corpus_size +--------------------------------------------------------------------- 1 | 35894 (1 row) @@ -257,8 +257,8 @@ DETAIL: distribution column value: 1 -- not router-plannable due to <= and IN SELECT * FROM articles_hash WHERE author_id <= 1 ORDER BY id; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -269,8 +269,8 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM articles_hash WHERE author_id IN (1, 3) ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 @@ -287,8 +287,8 @@ SELECT * FROM articles_hash WHERE author_id IN (1, NULL) ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -302,8 +302,8 @@ SELECT * FROM first_author; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id ----- + id +--------------------------------------------------------------------- 1 11 21 @@ -317,8 +317,8 @@ SELECT title FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - title --------------- + title +--------------------------------------------------------------------- arsenous alamo arcading @@ -333,8 +333,8 @@ SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | id | title -----+-----------+----+-------------- + id | author_id | id | title +--------------------------------------------------------------------- 1 | 1 | 1 | arsenous 11 | 1 | 11 | alamo 21 | 1 | 21 | arcading @@ -347,8 +347,8 @@ id_title AS (SELECT id, title from articles_hash WHERE author_id = 3) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | id | title -----+-----------+----+------- + id | author_id | id | title +--------------------------------------------------------------------- (0 rows) -- CTE joins are supported because they are both planned recursively @@ -356,33 +356,33 @@ WITH id_author AS ( SELECT id, author_id FROM articles_hash WHERE author_id = 1) id_title AS (SELECT id, title from articles_hash WHERE author_id = 2) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: cannot run command which targets multiple shards -DEBUG: generating subplan 68_1 for CTE id_author: SELECT id, author_id FROM public.articles_hash WHERE (author_id OPERATOR(pg_catalog.=) 1) +DEBUG: generating subplan XXX_1 for CTE id_author: SELECT id, author_id FROM public.articles_hash WHERE (author_id OPERATOR(pg_catalog.=) 1) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 -DEBUG: generating subplan 68_2 for CTE id_title: SELECT id, title FROM public.articles_hash WHERE (author_id OPERATOR(pg_catalog.=) 2) +DEBUG: generating subplan XXX_2 for CTE id_title: SELECT id, title FROM public.articles_hash WHERE (author_id OPERATOR(pg_catalog.=) 2) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 -DEBUG: Plan 68 query after replacing subqueries and CTEs: SELECT id_author.id, id_author.author_id, id_title.id, id_title.title FROM (SELECT intermediate_result.id, intermediate_result.author_id FROM read_intermediate_result('68_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint)) id_author, (SELECT intermediate_result.id, intermediate_result.title FROM read_intermediate_result('68_2'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, title character varying(20))) id_title WHERE (id_author.id OPERATOR(pg_catalog.=) id_title.id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT id_author.id, id_author.author_id, id_title.id, id_title.title FROM (SELECT intermediate_result.id, intermediate_result.author_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint)) id_author, (SELECT intermediate_result.id, intermediate_result.title FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, title character varying(20))) id_title WHERE (id_author.id OPERATOR(pg_catalog.=) id_title.id) DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | id | title -----+-----------+----+------- + id | author_id | id | title +--------------------------------------------------------------------- (0 rows) -- recursive CTEs are supported when filtered on partition column CREATE TABLE company_employees (company_id int, employee_id int, manager_id int); SELECT master_create_distributed_table('company_employees', 'company_id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('company_employees', 4, 1); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) INSERT INTO company_employees values(1, 1, 0); @@ -432,8 +432,8 @@ SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - company_id | employee_id | manager_id | level -------------+-------------+------------+------- + company_id | employee_id | manager_id | level +--------------------------------------------------------------------- 1 | 1 | 0 | 1 1 | 2 | 1 | 2 1 | 3 | 1 | 2 @@ -474,15 +474,15 @@ WITH new_article AS ( ) SELECT * FROM new_article; DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries -DEBUG: generating subplan 82_1 for CTE new_article: INSERT INTO public.articles_hash (id, author_id, title, word_count) VALUES (1, 1, 'arsenous'::character varying, 9) RETURNING id, author_id, title, word_count +DEBUG: generating subplan XXX_1 for CTE new_article: INSERT INTO public.articles_hash (id, author_id, title, word_count) VALUES (1, 1, 'arsenous'::character varying, 9) RETURNING id, author_id, title, word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 -DEBUG: Plan 82 query after replacing subqueries and CTEs: SELECT id, author_id, title, word_count FROM (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('82_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) new_article +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT id, author_id, title, word_count FROM (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) new_article DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9 (1 row) @@ -491,14 +491,14 @@ WITH update_article AS ( ) SELECT * FROM update_article; DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries -DEBUG: generating subplan 84_1 for CTE update_article: UPDATE public.articles_hash SET word_count = 10 WHERE ((id OPERATOR(pg_catalog.=) 1) AND (word_count OPERATOR(pg_catalog.=) 9)) RETURNING id, author_id, title, word_count +DEBUG: generating subplan XXX_1 for CTE update_article: UPDATE public.articles_hash SET word_count = 10 WHERE ((id OPERATOR(pg_catalog.=) 1) AND (word_count OPERATOR(pg_catalog.=) 9)) RETURNING id, author_id, title, word_count DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: Plan 84 query after replacing subqueries and CTEs: SELECT id, author_id, title, word_count FROM (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('84_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) update_article +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT id, author_id, title, word_count FROM (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) update_article DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 10 (1 row) @@ -507,14 +507,14 @@ WITH delete_article AS ( ) SELECT * FROM delete_article; DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries -DEBUG: generating subplan 86_1 for CTE delete_article: DELETE FROM public.articles_hash WHERE ((id OPERATOR(pg_catalog.=) 1) AND (word_count OPERATOR(pg_catalog.=) 10)) RETURNING id, author_id, title, word_count +DEBUG: generating subplan XXX_1 for CTE delete_article: DELETE FROM public.articles_hash WHERE ((id OPERATOR(pg_catalog.=) 1) AND (word_count OPERATOR(pg_catalog.=) 10)) RETURNING id, author_id, title, word_count DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: Plan 86 query after replacing subqueries and CTEs: SELECT id, author_id, title, word_count FROM (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('86_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) delete_article +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT id, author_id, title, word_count FROM (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) delete_article DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 10 (1 row) @@ -527,8 +527,6 @@ WITH new_article AS ( ) SELECT * FROM new_article; ERROR: WITH clause containing a data-modifying statement must be at the top level -LINE 2: WITH nested_cte AS ( - ^ -- Modifying statement in a CTE in subquery is also covered by PostgreSQL SELECT * FROM ( WITH new_article AS ( @@ -537,8 +535,6 @@ SELECT * FROM ( SELECT * FROM new_article ) AS subquery_cte; ERROR: WITH clause containing a data-modifying statement must be at the top level -LINE 2: WITH new_article AS ( - ^ -- grouping sets are supported on single shard SELECT id, substring(title, 2, 1) AS subtitle, count(*) @@ -548,8 +544,8 @@ SELECT ORDER BY id, subtitle; DEBUG: Creating router plan DEBUG: Plan is router executable - id | subtitle | count -----+----------+------- + id | subtitle | count +--------------------------------------------------------------------- 1 | | 1 3 | | 1 11 | | 1 @@ -585,8 +581,8 @@ SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count | position -----+-----------+--------------+------------+---------- + id | author_id | title | word_count | position +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 | 3 11 | 1 | alamo | 1347 | 3 21 | 1 | arcading | 5890 | 3 @@ -597,8 +593,8 @@ DETAIL: distribution column value: 1 SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 3; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count | position -----+-----------+--------------+------------+---------- + id | author_id | title | word_count | position +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 | 3 3 | 3 | asternal | 10480 | 3 11 | 1 | alamo | 1347 | 3 @@ -615,8 +611,8 @@ DEBUG: Plan is router executable SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 2 ORDER BY 4 DESC, 1 DESC, 2 DESC LIMIT 5; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 5 - id | author_id | title | word_count | position -----+-----------+------------+------------+---------- + id | author_id | title | word_count | position +--------------------------------------------------------------------- 12 | 2 | archiblast | 18185 | 3 42 | 2 | ausable | 15885 | 3 2 | 2 | abducing | 13642 | 3 @@ -631,8 +627,8 @@ ORDER BY articles_hash.id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - id | author_id | title | word_count -----+-----------+------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 2 | 2 | abducing | 13642 12 | 2 | archiblast | 18185 22 | 2 | antipope | 2728 @@ -646,12 +642,12 @@ FROM articles_hash, (SELECT id, word_count FROM articles_hash) AS test WHERE tes ORDER BY test.word_count DESC, articles_hash.id LIMIT 5; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 94_1 for subquery SELECT id, word_count FROM public.articles_hash -DEBUG: Plan 94 query after replacing subqueries and CTEs: SELECT articles_hash.id, test.word_count FROM public.articles_hash, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('94_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE (test.id OPERATOR(pg_catalog.=) articles_hash.id) ORDER BY test.word_count DESC, articles_hash.id LIMIT 5 +DEBUG: generating subplan XXX_1 for subquery SELECT id, word_count FROM public.articles_hash +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT articles_hash.id, test.word_count FROM public.articles_hash, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE (test.id OPERATOR(pg_catalog.=) articles_hash.id) ORDER BY test.word_count DESC, articles_hash.id LIMIT 5 DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 5 - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 50 | 19519 14 | 19094 48 | 18610 @@ -665,13 +661,13 @@ WHERE test.id = articles_hash.id and articles_hash.author_id = 1 ORDER BY articles_hash.id; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 96_1 for subquery SELECT id, word_count FROM public.articles_hash -DEBUG: Plan 96 query after replacing subqueries and CTEs: SELECT articles_hash.id, test.word_count FROM public.articles_hash, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('96_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE ((test.id OPERATOR(pg_catalog.=) articles_hash.id) AND (articles_hash.author_id OPERATOR(pg_catalog.=) 1)) ORDER BY articles_hash.id +DEBUG: generating subplan XXX_1 for subquery SELECT id, word_count FROM public.articles_hash +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT articles_hash.id, test.word_count FROM public.articles_hash, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE ((test.id OPERATOR(pg_catalog.=) articles_hash.id) AND (articles_hash.author_id OPERATOR(pg_catalog.=) 1)) ORDER BY articles_hash.id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -693,8 +689,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -708,8 +704,8 @@ SELECT * WHERE author_id = 1 OR author_id = 17; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -724,8 +720,8 @@ SELECT * WHERE author_id = 1 OR author_id = 18 ORDER BY 4 DESC, 3 DESC, 2 DESC, 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 41 | 1 | aznavour | 11814 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 @@ -740,8 +736,8 @@ SELECT id as article_id, word_count * id as random_value DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - article_id | random_value -------------+-------------- + article_id | random_value +--------------------------------------------------------------------- 1 | 9572 11 | 14817 21 | 123690 @@ -757,8 +753,8 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count ---------------+------------------- + first_author | second_word_count +--------------------------------------------------------------------- 10 | 17277 10 | 1820 10 | 6363 @@ -773,8 +769,8 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count ---------------+------------------- + first_author | second_word_count +--------------------------------------------------------------------- 10 | 19519 10 | 19519 10 | 19519 @@ -790,15 +786,15 @@ SELECT a.author_id as first_author, b.word_count as second_word_count LIMIT 3; DEBUG: Found no worker with all shard placements DEBUG: found no worker with all shard placements -DEBUG: generating subplan 105_1 for CTE single_shard: SELECT id, author_id, title, word_count FROM public.articles_single_shard_hash +DEBUG: generating subplan XXX_1 for CTE single_shard: SELECT id, author_id, title, word_count FROM public.articles_single_shard_hash DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: Plan 105 query after replacing subqueries and CTEs: SELECT a.author_id AS first_author, b.word_count AS second_word_count FROM public.articles_hash a, (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('105_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) b WHERE ((a.author_id OPERATOR(pg_catalog.=) 2) AND (a.author_id OPERATOR(pg_catalog.=) b.author_id)) LIMIT 3 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT a.author_id AS first_author, b.word_count AS second_word_count FROM public.articles_hash a, (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) b WHERE ((a.author_id OPERATOR(pg_catalog.=) 2) AND (a.author_id OPERATOR(pg_catalog.=) b.author_id)) LIMIT 3 DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - first_author | second_word_count ---------------+------------------- + first_author | second_word_count +--------------------------------------------------------------------- (0 rows) -- single shard select with limit is router plannable @@ -809,8 +805,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -825,8 +821,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 (2 rows) @@ -841,8 +837,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 31 | 1 | athwartships | 7271 21 | 1 | arcading | 5890 (2 rows) @@ -856,8 +852,8 @@ SELECT id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id ----- + id +--------------------------------------------------------------------- 1 11 21 @@ -873,8 +869,8 @@ SELECT DISTINCT id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id ----- + id +--------------------------------------------------------------------- 1 11 21 @@ -889,8 +885,8 @@ SELECT avg(word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - avg --------------------- + avg +--------------------------------------------------------------------- 12356.400000000000 (1 row) @@ -902,8 +898,8 @@ SELECT max(word_count) as max, min(word_count) as min, DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - max | min | sum | cnt --------+------+-------+----- + max | min | sum | cnt +--------------------------------------------------------------------- 18185 | 2728 | 61782 | 5 (1 row) @@ -915,8 +911,8 @@ SELECT max(word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - max -------- + max +--------------------------------------------------------------------- 11814 (1 row) @@ -929,8 +925,8 @@ SELECT * FROM ( ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 @@ -948,8 +944,8 @@ UNION (SELECT LEFT(title, 1) FROM articles_hash WHERE author_id = 3); DEBUG: Creating router plan DEBUG: Plan is router executable - left ------- + left +--------------------------------------------------------------------- a (1 row) @@ -958,8 +954,8 @@ INTERSECT (SELECT LEFT(title, 1) FROM articles_hash WHERE author_id = 3); DEBUG: Creating router plan DEBUG: Plan is router executable - left ------- + left +--------------------------------------------------------------------- a (1 row) @@ -971,8 +967,8 @@ SELECT * FROM ( ORDER BY 1; DEBUG: Creating router plan DEBUG: Plan is router executable - left ------- + left +--------------------------------------------------------------------- al ar at @@ -989,8 +985,8 @@ SET client_min_messages to 'NOTICE'; UNION (SELECT * FROM articles_hash WHERE author_id = 2) ORDER BY 1,2,3; - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 3 | 3 | asternal | 10480 @@ -1015,8 +1011,8 @@ SELECT * FROM ( (SELECT * FROM articles_hash WHERE author_id = 2)) uu ORDER BY 1, 2 LIMIT 5; - id | author_id | title | word_count -----+-----------+------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 11 | 1 | alamo | 1347 @@ -1036,8 +1032,8 @@ SELECT * FROM articles_hash a, articles_hash b WHERE a.id = b.id AND a.author_id = 1 ORDER BY 1 DESC; - id | author_id | title | word_count | id | author_id | title | word_count -----+-----------+--------------+------------+----+-----------+--------------+------------ + id | author_id | title | word_count | id | author_id | title | word_count +--------------------------------------------------------------------- 41 | 1 | aznavour | 11814 | 41 | 1 | aznavour | 11814 31 | 1 | athwartships | 7271 | 31 | 1 | athwartships | 7271 21 | 1 | arcading | 5890 | 21 | 1 | arcading | 5890 @@ -1052,8 +1048,8 @@ SELECT * FROM articles_hash WHERE author_id >= 1 AND author_id <= 3 ORDER BY 1,2,3,4; - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 3 | 3 | asternal | 10480 @@ -1082,8 +1078,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1096,8 +1092,8 @@ SELECT * FROM articles_hash WHERE author_id = 1 or id = 1; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1112,8 +1108,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 41 | 1 | aznavour | 11814 (2 rows) @@ -1125,8 +1121,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- not router plannable due to function call on the right side @@ -1134,8 +1130,8 @@ SELECT * FROM articles_hash WHERE author_id = (random()::int * 0 + 1); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1148,8 +1144,8 @@ SELECT * FROM articles_hash WHERE author_id = 1 or id = 1; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1164,8 +1160,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1178,8 +1174,8 @@ SELECT * FROM articles_hash WHERE 1 = abs(author_id); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1192,8 +1188,8 @@ SELECT * FROM articles_hash WHERE author_id = abs(author_id - 2); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1208,8 +1204,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) @@ -1218,8 +1214,8 @@ SELECT * FROM articles_hash WHERE (author_id = 1) is true; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1234,8 +1230,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1250,8 +1246,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 (2 rows) @@ -1263,8 +1259,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 (2 rows) @@ -1276,8 +1272,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 (2 rows) @@ -1289,8 +1285,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1305,8 +1301,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 41 | 1 | aznavour | 11814 @@ -1319,8 +1315,8 @@ SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - prev | title | word_count -----------+----------+------------ + prev | title | word_count +--------------------------------------------------------------------- | afrasia | 864 afrasia | adversa | 3164 adversa | antehall | 7707 @@ -1335,8 +1331,8 @@ SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - prev | title | word_count -----------+----------+------------ + prev | title | word_count +--------------------------------------------------------------------- aminate | aruru | 11389 antehall | aminate | 9089 adversa | antehall | 7707 @@ -1350,8 +1346,8 @@ SELECT id, MIN(id) over (order by word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | min -----+----- + id | min +--------------------------------------------------------------------- 11 | 11 21 | 11 31 | 11 @@ -1365,8 +1361,8 @@ SELECT id, word_count, AVG(word_count) over (order by word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | word_count | avg -----+------------+----------------------- + id | word_count | avg +--------------------------------------------------------------------- 11 | 1347 | 1347.0000000000000000 21 | 5890 | 3618.5000000000000000 31 | 7271 | 4836.0000000000000000 @@ -1380,8 +1376,8 @@ SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - word_count | rank -------------+------ + word_count | rank +--------------------------------------------------------------------- 1347 | 1 5890 | 2 7271 | 3 @@ -1408,8 +1404,8 @@ SELECT * WHERE false; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) SELECT * @@ -1417,8 +1413,8 @@ SELECT * WHERE author_id = 1 and false; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) SELECT * @@ -1426,8 +1422,8 @@ SELECT * WHERE author_id = 1 and 1=0; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) SELECT a.author_id as first_author, b.word_count as second_word_count @@ -1435,8 +1431,8 @@ SELECT a.author_id as first_author, b.word_count as second_word_count WHERE a.author_id = 10 and a.author_id = b.author_id and false; DEBUG: Creating router plan DEBUG: Plan is router executable - first_author | second_word_count ---------------+------------------- + first_author | second_word_count +--------------------------------------------------------------------- (0 rows) SELECT * @@ -1444,8 +1440,8 @@ SELECT * WHERE null; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- where false with immutable function returning false @@ -1454,8 +1450,8 @@ SELECT * WHERE a.author_id = 10 and int4eq(1, 2); DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) SELECT * @@ -1463,8 +1459,8 @@ SELECT * WHERE int4eq(1, 2); DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) SELECT a.author_id as first_author, b.word_count as second_word_count @@ -1473,8 +1469,8 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count ---------------+------------------- + first_author | second_word_count +--------------------------------------------------------------------- 10 | 19519 10 | 19519 10 | 19519 @@ -1487,8 +1483,8 @@ SELECT a.author_id as first_author, b.word_count as second_word_count WHERE a.author_id = 10 and a.author_id = b.author_id and int4eq(1, 2); DEBUG: Creating router plan DEBUG: Plan is router executable - first_author | second_word_count ---------------+------------------- + first_author | second_word_count +--------------------------------------------------------------------- (0 rows) -- partition_column is null clause does not prune out any shards, @@ -1497,8 +1493,8 @@ SELECT * FROM articles_hash a WHERE a.author_id is null; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- partition_column equals to null clause prunes out all shards @@ -1508,8 +1504,8 @@ SELECT * WHERE a.author_id = null; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- stable function returning bool @@ -1518,8 +1514,8 @@ SELECT * WHERE date_ne_timestamp('1954-04-11', '1954-04-11'::timestamp); DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) SELECT a.author_id as first_author, b.word_count as second_word_count @@ -1528,8 +1524,8 @@ SELECT a.author_id as first_author, b.word_count as second_word_count date_ne_timestamp('1954-04-11', '1954-04-11'::timestamp); DEBUG: Creating router plan DEBUG: Plan is router executable - first_author | second_word_count ---------------+------------------- + first_author | second_word_count +--------------------------------------------------------------------- (0 rows) -- union/difference /intersection with where false @@ -1544,8 +1540,8 @@ ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1562,8 +1558,8 @@ ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1577,8 +1573,8 @@ INTERSECT DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- CTEs with where false @@ -1589,8 +1585,8 @@ id_title AS (SELECT id, title from articles_hash WHERE author_id = 1 and 1=0) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | id | title -----+-----------+----+------- + id | author_id | id | title +--------------------------------------------------------------------- (0 rows) WITH id_author AS ( SELECT id, author_id FROM articles_hash WHERE author_id = 1), @@ -1598,8 +1594,8 @@ id_title AS (SELECT id, title from articles_hash WHERE author_id = 1) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id and 1=0; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | id | title -----+-----------+----+------- + id | author_id | id | title +--------------------------------------------------------------------- (0 rows) \set VERBOSITY DEFAULT @@ -1617,8 +1613,8 @@ SELECT * FROM hierarchy WHERE LEVEL <= 2 and 1=0; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - company_id | employee_id | manager_id | level -------------+-------------+------------+------- + company_id | employee_id | manager_id | level +--------------------------------------------------------------------- (0 rows) WITH RECURSIVE hierarchy as ( @@ -1635,8 +1631,8 @@ SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - company_id | employee_id | manager_id | level -------------+-------------+------------+------- + company_id | employee_id | manager_id | level +--------------------------------------------------------------------- 1 | 1 | 0 | 1 (1 row) @@ -1654,8 +1650,8 @@ SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - company_id | employee_id | manager_id | level -------------+-------------+------------+------- + company_id | employee_id | manager_id | level +--------------------------------------------------------------------- (0 rows) -- window functions with where false @@ -1664,8 +1660,8 @@ SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) WHERE author_id = 1 and 1=0; DEBUG: Creating router plan DEBUG: Plan is router executable - word_count | rank -------------+------ + word_count | rank +--------------------------------------------------------------------- (0 rows) -- function calls in WHERE clause with non-relational arguments @@ -1677,8 +1673,8 @@ SELECT author_id FROM articles_hash LIMIT 1; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 1 - author_id ------------ + author_id +--------------------------------------------------------------------- 1 (1 row) @@ -1691,8 +1687,8 @@ SELECT author_id FROM articles_hash LIMIT 1; DEBUG: Creating router plan DEBUG: Plan is router executable - author_id ------------ + author_id +--------------------------------------------------------------------- (0 rows) -- verify range partitioned tables can be used in router plannable queries @@ -1700,15 +1696,15 @@ DEBUG: Plan is router executable -- they are 'co-located' pairwise SET citus.shard_replication_factor TO 1; SELECT master_create_distributed_table('authors_range', 'id', 'range'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_distributed_table('articles_range', 'author_id', 'range'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_empty_shard('authors_range') as shard_id \gset @@ -1731,23 +1727,23 @@ UPDATE pg_dist_shard SET shardminvalue = 31, shardmaxvalue=40 WHERE shardid = :s SELECT * FROM articles_range where author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles_range where author_id = 1 or author_id = 5; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- zero shard select query is router plannable SELECT * FROM articles_range where author_id = 1 and author_id = 2; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- single shard joins on range partitioned table are router plannable @@ -1755,8 +1751,8 @@ SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id) WHERE ar.author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- + id | author_id | title | word_count | name | id +--------------------------------------------------------------------- (0 rows) -- zero shard join is router plannable @@ -1764,8 +1760,8 @@ SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id) WHERE ar.author_id = 1 and au.id = 2; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- + id | author_id | title | word_count | name | id +--------------------------------------------------------------------- (0 rows) -- This query was intended to test "multi-shard join is not router plannable" @@ -1802,8 +1798,8 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 20 - id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- + id | author_id | title | word_count | name | id +--------------------------------------------------------------------- (0 rows) -- This query was intended to test "this is a bug, it is a single shard join @@ -1840,8 +1836,8 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 20 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 20 - id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- + id | author_id | title | word_count | name | id +--------------------------------------------------------------------- (0 rows) RESET citus.task_executor_type; @@ -1850,8 +1846,8 @@ SELECT * FROM articles_range ar join authors_range au on (ar.id = au.id) WHERE ar.author_id = 1 and au.id < 10; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- + id | author_id | title | word_count | name | id +--------------------------------------------------------------------- (0 rows) -- join between hash and range partition tables are router plannable @@ -1863,8 +1859,8 @@ SELECT * FROM articles_hash ar join authors_range au on (ar.author_id = au.id) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- + id | author_id | title | word_count | name | id +--------------------------------------------------------------------- (0 rows) -- not router plannable @@ -1903,8 +1899,8 @@ SELECT * FROM articles_range ar join authors_reference au on (ar.author_id = au. WHERE ar.author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- + id | author_id | title | word_count | name | id +--------------------------------------------------------------------- (0 rows) -- still hits a single shard and router plannable @@ -1912,25 +1908,25 @@ SELECT * FROM articles_range ar join authors_reference au on (ar.author_id = au. WHERE ar.author_id = 1 or ar.author_id = 5; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- + id | author_id | title | word_count | name | id +--------------------------------------------------------------------- (0 rows) -- it is not router plannable if hit multiple shards SELECT * FROM articles_range ar join authors_reference au on (ar.author_id = au.id) WHERE ar.author_id = 1 or ar.author_id = 15; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- + id | author_id | title | word_count | name | id +--------------------------------------------------------------------- (0 rows) -- following is a bug, function should have been -- evaluated at master before going to worker -- need to use a append distributed table here SELECT master_create_distributed_table('articles_append', 'author_id', 'append'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SET citus.shard_replication_factor TO 1; @@ -2007,8 +2003,8 @@ SELECT * FROM articles_hash ORDER BY author_id, id LIMIT 5; - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -2048,8 +2044,8 @@ SELECT DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - c ---- + c +--------------------------------------------------------------------- 5 (1 row) @@ -2069,8 +2065,8 @@ SELECT author_id ORDER BY c; DEBUG: Router planner cannot handle multi-shard select queries - c ---- + c +--------------------------------------------------------------------- 4 5 5 @@ -2092,8 +2088,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -2112,8 +2108,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -2133,14 +2129,14 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 FETCH test_cursor; - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) FETCH ALL test_cursor; - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 @@ -2148,13 +2144,13 @@ FETCH ALL test_cursor; (4 rows) FETCH test_cursor; -- fetch one row after the last - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 41 | 1 | aznavour | 11814 (1 row) @@ -2189,8 +2185,8 @@ SELECT count(*), count(*) FILTER (WHERE id < 3) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count | count --------+------- + count | count +--------------------------------------------------------------------- 5 | 1 (1 row) @@ -2199,8 +2195,8 @@ SELECT count(*), count(*) FILTER (WHERE id < 3) FROM articles_hash WHERE author_id = 1 or author_id = 2; DEBUG: Router planner cannot handle multi-shard select queries - count | count --------+------- + count | count +--------------------------------------------------------------------- 10 | 2 (1 row) @@ -2213,8 +2209,8 @@ EXECUTE author_1_articles; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -2231,8 +2227,8 @@ EXECUTE author_articles(1); DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -2261,8 +2257,8 @@ DETAIL: distribution column value: 1 CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash ah WHERE author_id = 1" PL/pgSQL function author_articles_max_id() line 5 at SQL statement - author_articles_max_id ------------------------- + author_articles_max_id +--------------------------------------------------------------------- 41 (1 row) @@ -2289,8 +2285,8 @@ CONTEXT: SQL statement "SELECT ah.id, ah.word_count FROM articles_hash ah WHERE author_id = 1" PL/pgSQL function author_articles_id_word_count() line 4 at RETURN QUERY - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -2305,8 +2301,8 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 SELECT * FROM mv_articles_hash_empty; - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -2318,8 +2314,8 @@ CREATE MATERIALIZED VIEW mv_articles_hash_data AS SELECT * FROM articles_hash WHERE author_id in (1,2); DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM mv_articles_hash_data ORDER BY 1, 2, 3, 4; - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 11 | 1 | alamo | 1347 @@ -2340,8 +2336,8 @@ SELECT id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id ----- + id +--------------------------------------------------------------------- 1 11 21 @@ -2360,8 +2356,8 @@ SELECT id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id ----- + id +--------------------------------------------------------------------- 1 11 21 @@ -2376,15 +2372,15 @@ SET client_min_messages to 'NOTICE'; SET citus.shard_replication_factor TO 2; CREATE TABLE failure_test (a int, b int); SELECT master_create_distributed_table('failure_test', 'a', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('failure_test', 2); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) SET citus.enable_ddl_propagation TO off; @@ -2402,7 +2398,7 @@ GRANT INSERT ON ALL TABLES IN SCHEMA public TO router_user; -- we will fail to connect to worker 2, since the user does not exist BEGIN; INSERT INTO failure_test VALUES (1, 1); -WARNING: connection error: localhost:57638 +WARNING: connection error: localhost:xxxxx DETAIL: FATAL: role "router_user" does not exist SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN ( @@ -2410,8 +2406,8 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE logicalrelid = 'failure_test'::regclass ) ORDER BY placementid; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 840017 | 1 | localhost | 57637 840017 | 3 | localhost | 57638 840018 | 1 | localhost | 57638 @@ -2420,7 +2416,7 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement ROLLBACK; INSERT INTO failure_test VALUES (2, 1); -WARNING: connection error: localhost:57638 +WARNING: connection error: localhost:xxxxx DETAIL: FATAL: role "router_user" does not exist SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN ( @@ -2428,8 +2424,8 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE logicalrelid = 'failure_test'::regclass ) ORDER BY placementid; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 840017 | 1 | localhost | 57637 840017 | 1 | localhost | 57638 840018 | 3 | localhost | 57638 diff --git a/src/test/regress/expected/multi_router_planner_fast_path.out b/src/test/regress/expected/multi_router_planner_fast_path.out index ce9090fc0..d9e4f136c 100644 --- a/src/test/regress/expected/multi_router_planner_fast_path.out +++ b/src/test/regress/expected/multi_router_planner_fast_path.out @@ -36,16 +36,16 @@ CREATE TABLE authors_range ( name varchar(20), id bigint ); SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 2; SELECT create_distributed_table('articles_hash', 'author_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE authors_reference ( name varchar(20), id bigint ); SELECT create_reference_table('authors_reference'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- create a bunch of test data @@ -70,8 +70,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - id | author_id | title | word_count -----+-----------+-----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 50 | 10 | anjanette | 19519 (1 row) @@ -81,8 +81,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - title ------------- + title +--------------------------------------------------------------------- aggrandize absentness andelee @@ -98,8 +98,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - title | word_count -------------+------------ + title | word_count +--------------------------------------------------------------------- anjanette | 19519 aggrandize | 17277 attemper | 14976 @@ -116,8 +116,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - title | id ----------+---- + title | id +--------------------------------------------------------------------- aruru | 5 adversa | 15 (2 rows) @@ -130,8 +130,8 @@ SELECT title, author_id FROM articles_hash ORDER BY author_id ASC, id; DEBUG: Creating router plan DEBUG: Plan is router executable - title | author_id --------------+----------- + title | author_id +--------------------------------------------------------------------- aseptic | 7 auriga | 7 arsenous | 7 @@ -155,16 +155,16 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - author_id | corpus_size ------------+------------- + author_id | corpus_size +--------------------------------------------------------------------- 1 | 35894 (1 row) -- fast path planner only support = operator SELECT * FROM articles_hash WHERE author_id <= 1; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -175,8 +175,8 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM articles_hash WHERE author_id IN (1, 3); DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 @@ -195,8 +195,8 @@ SELECT * FROM first_author; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id ----- + id +--------------------------------------------------------------------- 1 11 21 @@ -211,8 +211,8 @@ SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | id | title -----+-----------+----+-------------- + id | author_id | id | title +--------------------------------------------------------------------- 1 | 1 | 1 | arsenous 11 | 1 | 11 | alamo 21 | 1 | 21 | arcading @@ -226,36 +226,36 @@ WITH id_author AS ( SELECT id, author_id FROM articles_hash WHERE author_id = 1) id_title AS (SELECT id, title from articles_hash WHERE author_id = 2) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: cannot run command which targets multiple shards -DEBUG: generating subplan 12_1 for CTE id_author: SELECT id, author_id FROM fast_path_router_select.articles_hash WHERE (author_id OPERATOR(pg_catalog.=) 1) +DEBUG: generating subplan XXX_1 for CTE id_author: SELECT id, author_id FROM fast_path_router_select.articles_hash WHERE (author_id OPERATOR(pg_catalog.=) 1) DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 -DEBUG: generating subplan 12_2 for CTE id_title: SELECT id, title FROM fast_path_router_select.articles_hash WHERE (author_id OPERATOR(pg_catalog.=) 2) +DEBUG: generating subplan XXX_2 for CTE id_title: SELECT id, title FROM fast_path_router_select.articles_hash WHERE (author_id OPERATOR(pg_catalog.=) 2) DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 -DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT id_author.id, id_author.author_id, id_title.id, id_title.title FROM (SELECT intermediate_result.id, intermediate_result.author_id FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint)) id_author, (SELECT intermediate_result.id, intermediate_result.title FROM read_intermediate_result('12_2'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, title character varying(20))) id_title WHERE (id_author.id OPERATOR(pg_catalog.=) id_title.id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT id_author.id, id_author.author_id, id_title.id, id_title.title FROM (SELECT intermediate_result.id, intermediate_result.author_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint)) id_author, (SELECT intermediate_result.id, intermediate_result.title FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, title character varying(20))) id_title WHERE (id_author.id OPERATOR(pg_catalog.=) id_title.id) DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | id | title -----+-----------+----+------- + id | author_id | id | title +--------------------------------------------------------------------- (0 rows) CREATE TABLE company_employees (company_id int, employee_id int, manager_id int); SELECT master_create_distributed_table('company_employees', 'company_id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) -- do not print notices from workers since the order is not deterministic SET client_min_messages TO DEFAULT; SELECT master_create_worker_shards('company_employees', 4, 1); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) SET client_min_messages TO 'DEBUG2'; @@ -307,8 +307,8 @@ SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - company_id | employee_id | manager_id | level -------------+-------------+------------+------- + company_id | employee_id | manager_id | level +--------------------------------------------------------------------- 1 | 1 | 0 | 1 1 | 2 | 1 | 2 1 | 3 | 1 | 2 @@ -319,14 +319,14 @@ WITH update_article AS ( ) SELECT * FROM update_article; DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries -DEBUG: generating subplan 24_1 for CTE update_article: UPDATE fast_path_router_select.articles_hash SET word_count = 10 WHERE ((id OPERATOR(pg_catalog.=) 1) AND (word_count OPERATOR(pg_catalog.=) 9)) RETURNING id, author_id, title, word_count +DEBUG: generating subplan XXX_1 for CTE update_article: UPDATE fast_path_router_select.articles_hash SET word_count = 10 WHERE ((id OPERATOR(pg_catalog.=) 1) AND (word_count OPERATOR(pg_catalog.=) 9)) RETURNING id, author_id, title, word_count DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: Plan 24 query after replacing subqueries and CTEs: SELECT id, author_id, title, word_count FROM (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('24_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) update_article +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT id, author_id, title, word_count FROM (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) update_article DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) WITH delete_article AS ( @@ -334,14 +334,14 @@ WITH delete_article AS ( ) SELECT * FROM delete_article; DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries -DEBUG: generating subplan 26_1 for CTE delete_article: DELETE FROM fast_path_router_select.articles_hash WHERE ((id OPERATOR(pg_catalog.=) 1) AND (word_count OPERATOR(pg_catalog.=) 10)) RETURNING id, author_id, title, word_count +DEBUG: generating subplan XXX_1 for CTE delete_article: DELETE FROM fast_path_router_select.articles_hash WHERE ((id OPERATOR(pg_catalog.=) 1) AND (word_count OPERATOR(pg_catalog.=) 10)) RETURNING id, author_id, title, word_count DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT id, author_id, title, word_count FROM (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) delete_article +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT id, author_id, title, word_count FROM (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) delete_article DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- grouping sets are supported via fast-path @@ -355,8 +355,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | subtitle | count -----+----------+------- + id | subtitle | count +--------------------------------------------------------------------- 1 | | 1 11 | | 1 21 | | 1 @@ -383,8 +383,8 @@ SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count | position -----+-----------+--------------+------------+---------- + id | author_id | title | word_count | position +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 | 3 11 | 1 | alamo | 1347 | 3 21 | 1 | arcading | 5890 | 3 @@ -399,8 +399,8 @@ ORDER BY articles_hash.id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - id | author_id | title | word_count -----+-----------+------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 2 | 2 | abducing | 13642 12 | 2 | archiblast | 18185 22 | 2 | antipope | 2728 @@ -414,12 +414,12 @@ FROM articles_hash, (SELECT id, word_count FROM articles_hash) AS test WHERE tes ORDER BY test.word_count DESC, articles_hash.id LIMIT 5; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 32_1 for subquery SELECT id, word_count FROM fast_path_router_select.articles_hash -DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT articles_hash.id, test.word_count FROM fast_path_router_select.articles_hash, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('32_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE (test.id OPERATOR(pg_catalog.=) articles_hash.id) ORDER BY test.word_count DESC, articles_hash.id LIMIT 5 +DEBUG: generating subplan XXX_1 for subquery SELECT id, word_count FROM fast_path_router_select.articles_hash +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT articles_hash.id, test.word_count FROM fast_path_router_select.articles_hash, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE (test.id OPERATOR(pg_catalog.=) articles_hash.id) ORDER BY test.word_count DESC, articles_hash.id LIMIT 5 DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 5 - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 50 | 19519 14 | 19094 48 | 18610 @@ -433,13 +433,13 @@ WHERE test.id = articles_hash.id and articles_hash.author_id = 1 ORDER BY articles_hash.id; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 34_1 for subquery SELECT id, word_count FROM fast_path_router_select.articles_hash -DEBUG: Plan 34 query after replacing subqueries and CTEs: SELECT articles_hash.id, test.word_count FROM fast_path_router_select.articles_hash, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('34_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE ((test.id OPERATOR(pg_catalog.=) articles_hash.id) AND (articles_hash.author_id OPERATOR(pg_catalog.=) 1)) ORDER BY articles_hash.id +DEBUG: generating subplan XXX_1 for subquery SELECT id, word_count FROM fast_path_router_select.articles_hash +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT articles_hash.id, test.word_count FROM fast_path_router_select.articles_hash, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE ((test.id OPERATOR(pg_catalog.=) articles_hash.id) AND (articles_hash.author_id OPERATOR(pg_catalog.=) 1)) ORDER BY articles_hash.id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -462,8 +462,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -478,8 +478,8 @@ SELECT * WHERE author_id = 1 OR author_id = 17; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -495,8 +495,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - article_id | random_value -------------+-------------- + article_id | random_value +--------------------------------------------------------------------- 1 | 9572 11 | 14817 21 | 123690 @@ -512,8 +512,8 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count ---------------+------------------- + first_author | second_word_count +--------------------------------------------------------------------- 10 | 17277 10 | 1820 10 | 6363 @@ -528,8 +528,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -545,8 +545,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 (2 rows) @@ -562,8 +562,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 31 | 1 | athwartships | 7271 21 | 1 | arcading | 5890 (2 rows) @@ -578,8 +578,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id ----- + id +--------------------------------------------------------------------- 1 11 21 @@ -596,8 +596,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id ----- + id +--------------------------------------------------------------------- 1 11 21 @@ -613,8 +613,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - avg --------------------- + avg +--------------------------------------------------------------------- 12356.400000000000 (1 row) @@ -627,8 +627,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - max | min | sum | cnt --------+------+-------+----- + max | min | sum | cnt +--------------------------------------------------------------------- 18185 | 2728 | 61782 | 5 (1 row) @@ -641,8 +641,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - max -------- + max +--------------------------------------------------------------------- 11814 (1 row) @@ -655,8 +655,8 @@ SELECT * FROM ( ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 @@ -674,8 +674,6 @@ SELECT LEFT(title, 1) FROM articles_hash WHERE author_id = 1 -- top-level union queries are supported through recursive planning SET client_min_messages to 'NOTICE'; ERROR: syntax error at or near "SET" -LINE 3: SET client_min_messages to 'NOTICE'; - ^ -- unions in subqueries are not supported via fast-path planning SELECT * FROM ( (SELECT * FROM articles_hash WHERE author_id = 1) @@ -686,8 +684,8 @@ LIMIT 5; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -705,8 +703,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -728,8 +726,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 68719476736 - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- cannot go through fast-path due to @@ -740,8 +738,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -755,8 +753,8 @@ SELECT * FROM articles_hash WHERE author_id = 1 or id = 1; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -774,8 +772,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 41 | 1 | aznavour | 11814 (2 rows) @@ -786,8 +784,8 @@ SELECT * FROM articles_hash WHERE author_id = 1 and id = 1 or id = 41; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 41 | 1 | aznavour | 11814 (2 rows) @@ -802,8 +800,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- not router plannable due to function call on the right side @@ -811,8 +809,8 @@ SELECT * FROM articles_hash WHERE author_id = (random()::int * 0 + 1); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -828,8 +826,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -843,8 +841,8 @@ SELECT * FROM articles_hash WHERE 1 = abs(author_id); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -858,8 +856,8 @@ SELECT * FROM articles_hash WHERE author_id = abs(author_id - 2); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -876,8 +874,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) @@ -886,8 +884,8 @@ SELECT * FROM articles_hash WHERE (author_id = 1) is true; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -902,8 +900,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -917,29 +915,29 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 15 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM articles_hash WHERE (author_id = 15) OR (id = 1 AND word_count > 5); DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM articles_hash WHERE (id = 15) OR (author_id = 1 AND word_count > 5); DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 6 (1 row) SELECT count(*) FROM articles_hash WHERE (id = 15) AND (author_id = 1 OR word_count > 5); DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -948,15 +946,15 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM articles_hash WHERE (id = 15) AND (title ilike 'a%' AND (word_count > 5 OR author_id = 2)); DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -965,8 +963,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -975,8 +973,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -988,8 +986,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 (2 rows) @@ -1002,8 +1000,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 (2 rows) @@ -1016,8 +1014,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 (2 rows) @@ -1030,8 +1028,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1047,8 +1045,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 41 | 1 | aznavour | 11814 @@ -1062,8 +1060,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - prev | title | word_count -----------+----------+------------ + prev | title | word_count +--------------------------------------------------------------------- | afrasia | 864 afrasia | adversa | 3164 adversa | antehall | 7707 @@ -1079,8 +1077,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - prev | title | word_count -----------+----------+------------ + prev | title | word_count +--------------------------------------------------------------------- aminate | aruru | 11389 antehall | aminate | 9089 adversa | antehall | 7707 @@ -1095,8 +1093,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | min -----+----- + id | min +--------------------------------------------------------------------- 11 | 11 21 | 11 31 | 11 @@ -1111,8 +1109,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | word_count | avg -----+------------+----------------------- + id | word_count | avg +--------------------------------------------------------------------- 11 | 1347 | 1347.0000000000000000 21 | 5890 | 3618.5000000000000000 31 | 7271 | 4836.0000000000000000 @@ -1127,8 +1125,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - word_count | rank -------------+------ + word_count | rank +--------------------------------------------------------------------- 1347 | 1 5890 | 2 7271 | 3 @@ -1152,8 +1150,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - author_id | id | t1 | cnt_with_filter | cnt_with_filter_2 | case_cnt | coalesce ------------+----+------------------------------+-----------------+-------------------+------------------------+---------- + author_id | id | t1 | cnt_with_filter | cnt_with_filter_2 | case_cnt | coalesce +--------------------------------------------------------------------- 1 | 1 | 83.20028854345579490574 | 0 | 1 | | 0 1 | 11 | 629.20816629547141796586 | 1 | 1 | 44.0000000000000000 | 1 1 | 21 | 915.20501693381380745499 | 0 | 1 | 0.00000000000000000000 | 0 @@ -1167,8 +1165,8 @@ SELECT * WHERE false; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- fast-path with false @@ -1178,8 +1176,8 @@ SELECT * DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- fast-path with false @@ -1190,8 +1188,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) SELECT * @@ -1200,8 +1198,8 @@ SELECT * DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- we cannot qualify dist_key = X operator Y via @@ -1212,8 +1210,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - id | author_id | title | word_count -----+-----------+------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 2 | 2 | abducing | 13642 12 | 2 | archiblast | 18185 22 | 2 | antipope | 2728 @@ -1230,8 +1228,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- partition_column is null clause does not prune out any shards, @@ -1241,8 +1239,8 @@ SELECT * FROM articles_hash a WHERE a.author_id is null; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- partition_column equals to null clause prunes out all shards @@ -1253,8 +1251,8 @@ SELECT * WHERE a.author_id = null; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- union/difference /intersection with where false @@ -1269,8 +1267,8 @@ ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1287,8 +1285,8 @@ SELECT * FROM ( ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- window functions with where false @@ -1299,8 +1297,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - word_count | rank -------------+------ + word_count | rank +--------------------------------------------------------------------- (0 rows) -- create a dummy function to be used in filtering @@ -1362,8 +1360,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - c ---- + c +--------------------------------------------------------------------- 5 (1 row) @@ -1377,8 +1375,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1398,8 +1396,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1421,14 +1419,14 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 FETCH test_cursor; - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) FETCH ALL test_cursor; - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 @@ -1436,13 +1434,13 @@ FETCH ALL test_cursor; (4 rows) FETCH test_cursor; -- fetch one row after the last - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 41 | 1 | aznavour | 11814 (1 row) @@ -1480,8 +1478,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count | count --------+------- + count | count +--------------------------------------------------------------------- 5 | 1 (1 row) @@ -1495,8 +1493,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1505,8 +1503,8 @@ DETAIL: distribution column value: 1 (5 rows) EXECUTE author_1_articles; - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1515,8 +1513,8 @@ EXECUTE author_1_articles; (5 rows) EXECUTE author_1_articles; - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1525,8 +1523,8 @@ EXECUTE author_1_articles; (5 rows) EXECUTE author_1_articles; - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1535,8 +1533,8 @@ EXECUTE author_1_articles; (5 rows) EXECUTE author_1_articles; - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1545,8 +1543,8 @@ EXECUTE author_1_articles; (5 rows) EXECUTE author_1_articles; - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1564,8 +1562,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1578,8 +1576,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1592,8 +1590,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1606,8 +1604,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1620,8 +1618,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1635,8 +1633,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1663,38 +1661,38 @@ SELECT author_articles_max_id(); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - author_articles_max_id ------------------------- + author_articles_max_id +--------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); - author_articles_max_id ------------------------- + author_articles_max_id +--------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); - author_articles_max_id ------------------------- + author_articles_max_id +--------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); - author_articles_max_id ------------------------- + author_articles_max_id +--------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); - author_articles_max_id ------------------------- + author_articles_max_id +--------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); - author_articles_max_id ------------------------- + author_articles_max_id +--------------------------------------------------------------------- 41 (1 row) @@ -1713,8 +1711,8 @@ SELECT author_articles_max_id(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - author_articles_max_id ------------------------- + author_articles_max_id +--------------------------------------------------------------------- 41 (1 row) @@ -1722,8 +1720,8 @@ SELECT author_articles_max_id(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - author_articles_max_id ------------------------- + author_articles_max_id +--------------------------------------------------------------------- 41 (1 row) @@ -1731,8 +1729,8 @@ SELECT author_articles_max_id(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - author_articles_max_id ------------------------- + author_articles_max_id +--------------------------------------------------------------------- 41 (1 row) @@ -1740,8 +1738,8 @@ SELECT author_articles_max_id(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - author_articles_max_id ------------------------- + author_articles_max_id +--------------------------------------------------------------------- 41 (1 row) @@ -1749,8 +1747,8 @@ SELECT author_articles_max_id(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - author_articles_max_id ------------------------- + author_articles_max_id +--------------------------------------------------------------------- 41 (1 row) @@ -1759,8 +1757,8 @@ DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - author_articles_max_id ------------------------- + author_articles_max_id +--------------------------------------------------------------------- 41 (1 row) @@ -1779,8 +1777,8 @@ SELECT * FROM author_articles_id_word_count(); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1789,8 +1787,8 @@ DEBUG: Plan is router executable (5 rows) SELECT * FROM author_articles_id_word_count(); - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1799,8 +1797,8 @@ SELECT * FROM author_articles_id_word_count(); (5 rows) SELECT * FROM author_articles_id_word_count(); - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1809,8 +1807,8 @@ SELECT * FROM author_articles_id_word_count(); (5 rows) SELECT * FROM author_articles_id_word_count(); - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1819,8 +1817,8 @@ SELECT * FROM author_articles_id_word_count(); (5 rows) SELECT * FROM author_articles_id_word_count(); - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1829,8 +1827,8 @@ SELECT * FROM author_articles_id_word_count(); (5 rows) SELECT * FROM author_articles_id_word_count(); - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1853,8 +1851,8 @@ SELECT * FROM author_articles_id_word_count(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1866,8 +1864,8 @@ SELECT * FROM author_articles_id_word_count(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1879,8 +1877,8 @@ SELECT * FROM author_articles_id_word_count(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1892,8 +1890,8 @@ SELECT * FROM author_articles_id_word_count(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1905,8 +1903,8 @@ SELECT * FROM author_articles_id_word_count(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1919,8 +1917,8 @@ DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1989,8 +1987,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -1999,8 +1997,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -2009,8 +2007,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 3 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -2019,8 +2017,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -2029,8 +2027,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -2040,8 +2038,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 6 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -2052,8 +2050,8 @@ SELECT * FROM test_view; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -2069,8 +2067,8 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 SELECT * FROM mv_articles_hash_empty; - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -2087,8 +2085,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id ----- + id +--------------------------------------------------------------------- 1 11 21 @@ -2108,8 +2106,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id ----- + id +--------------------------------------------------------------------- 1 11 21 @@ -2136,9 +2134,9 @@ CREATE TABLE collections_list_2 -- we don't need many shards SET citus.shard_count TO 2; SELECT create_distributed_table('collections_list', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO collections_list SELECT i % 10, now(), (i % 2) + 1, i*i FROM generate_series(0, 50)i; @@ -2148,8 +2146,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 - count -------- + count +--------------------------------------------------------------------- 5 (1 row) @@ -2158,8 +2156,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 - count -------- + count +--------------------------------------------------------------------- 5 (1 row) @@ -2168,8 +2166,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -2183,8 +2181,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 - count -------- + count +--------------------------------------------------------------------- 5 (1 row) @@ -2193,8 +2191,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 - count -------- + count +--------------------------------------------------------------------- 5 (1 row) @@ -2203,8 +2201,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 - count -------- + count +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/multi_schema_support.out b/src/test/regress/expected/multi_schema_support.out index 492e5855c..3a225cca9 100644 --- a/src/test/regress/expected/multi_schema_support.out +++ b/src/test/regress/expected/multi_schema_support.out @@ -20,28 +20,28 @@ CREATE TABLE test_schema_support.nation_append( n_comment varchar(152) ); SELECT master_create_distributed_table('test_schema_support.nation_append', 'n_nationkey', 'append'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_empty_shard('test_schema_support.nation_append'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 1190000 (1 row) -- append table to shard SELECT master_append_table_to_shard(1190000, 'public.nation_local', 'localhost', :master_port); - master_append_table_to_shard ------------------------------- + master_append_table_to_shard +--------------------------------------------------------------------- 0.00533333 (1 row) -- verify table actually appended to shard SELECT COUNT(*) FROM test_schema_support.nation_append; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) @@ -52,56 +52,56 @@ CREATE TABLE test_schema_support."nation._'append" ( n_regionkey integer not null, n_comment varchar(152)); SELECT master_create_distributed_table('test_schema_support."nation._''append"', 'n_nationkey', 'append'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_empty_shard('test_schema_support."nation._''append"'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 1190001 (1 row) SELECT master_append_table_to_shard(1190001, 'nation_local', 'localhost', :master_port); - master_append_table_to_shard ------------------------------- + master_append_table_to_shard +--------------------------------------------------------------------- 0.00533333 (1 row) -- verify table actually appended to shard SELECT COUNT(*) FROM test_schema_support."nation._'append"; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) -- test master_append_table_to_shard with schema with search_path is set SET search_path TO test_schema_support; SELECT master_append_table_to_shard(1190000, 'public.nation_local', 'localhost', :master_port); - master_append_table_to_shard ------------------------------- + master_append_table_to_shard +--------------------------------------------------------------------- 0.00533333 (1 row) -- verify table actually appended to shard SELECT COUNT(*) FROM nation_append; - count -------- + count +--------------------------------------------------------------------- 12 (1 row) -- test with search_path is set and shard name contains special characters SELECT master_append_table_to_shard(1190001, 'nation_local', 'localhost', :master_port); - master_append_table_to_shard ------------------------------- + master_append_table_to_shard +--------------------------------------------------------------------- 0.00533333 (1 row) -- verify table actually appended to shard SELECT COUNT(*) FROM "nation._'append"; - count -------- + count +--------------------------------------------------------------------- 12 (1 row) @@ -116,9 +116,9 @@ CREATE TABLE nation_append_search_path( n_comment varchar(152) ); SELECT master_create_distributed_table('nation_append_search_path', 'n_nationkey', 'append'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) \copy nation_append_search_path FROM STDIN with delimiter '|'; @@ -130,15 +130,15 @@ CREATE TABLE test_schema_support.nation_hash( n_comment varchar(152) ); SELECT master_create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('test_schema_support.nation_hash', 4, 2); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) -- test cursors @@ -149,20 +149,20 @@ DECLARE test_cursor CURSOR FOR FROM test_schema_support.nation_append WHERE n_nationkey = 1; FETCH test_cursor; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH BACKWARD test_cursor; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -175,20 +175,20 @@ DECLARE test_cursor CURSOR FOR FROM nation_append WHERE n_nationkey = 1; FETCH test_cursor; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH BACKWARD test_cursor; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -198,9 +198,9 @@ SET search_path TO public; INSERT INTO test_schema_support.nation_hash(n_nationkey, n_name, n_regionkey) VALUES (6, 'FRANCE', 3); -- verify insertion SELECT * FROM test_schema_support.nation_hash WHERE n_nationkey = 6; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+----------- - 6 | FRANCE | 3 | + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- + 6 | FRANCE | 3 | (1 row) -- test with search_path is set @@ -208,9 +208,9 @@ SET search_path TO test_schema_support; INSERT INTO nation_hash(n_nationkey, n_name, n_regionkey) VALUES (7, 'GERMANY', 3); -- verify insertion SELECT * FROM nation_hash WHERE n_nationkey = 7; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+----------- - 7 | GERMANY | 3 | + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- + 7 | GERMANY | 3 | (1 row) -- test UDFs with schemas @@ -254,8 +254,8 @@ LANGUAGE 'plpgsql' IMMUTABLE; \c - - - :master_port -- UDF in public, table in a schema other than public, search_path is not set SELECT dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; - dummyfunction ---------------- + dummyfunction +--------------------------------------------------------------------- 1 10 11 @@ -269,8 +269,8 @@ SELECT dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY -- UDF in public, table in a schema other than public, search_path is set SET search_path TO test_schema_support; SELECT public.dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; - dummyfunction ---------------- + dummyfunction +--------------------------------------------------------------------- 1 10 11 @@ -323,8 +323,8 @@ LANGUAGE 'plpgsql' IMMUTABLE; -- UDF in schema, table in a schema other than public, search_path is not set SET search_path TO public; SELECT test_schema_support.dummyFunction2(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; - dummyfunction2 ----------------- + dummyfunction2 +--------------------------------------------------------------------- 1 10 11 @@ -338,8 +338,8 @@ SELECT test_schema_support.dummyFunction2(n_nationkey) FROM test_schema_support. -- UDF in schema, table in a schema other than public, search_path is set SET search_path TO test_schema_support; SELECT dummyFunction2(n_nationkey) FROM nation_hash GROUP BY 1 ORDER BY 1; - dummyfunction2 ----------------- + dummyfunction2 +--------------------------------------------------------------------- 1 10 11 @@ -384,16 +384,16 @@ CREATE OPERATOR test_schema_support.=== ( \c - - - :master_port -- test with search_path is not set SELECT * FROM test_schema_support.nation_hash WHERE n_nationkey OPERATOR(test_schema_support.===) 1; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) -- test with search_path is set SET search_path TO test_schema_support; SELECT * FROM nation_hash WHERE n_nationkey OPERATOR(===) 1; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -402,16 +402,16 @@ SET search_path TO public; UPDATE test_schema_support.nation_hash SET n_regionkey = n_regionkey + 1; --verify modification SELECT * FROM test_schema_support.nation_hash ORDER BY 1,2,3,4; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 0 | ALGERIA | 1 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 2 | al foxes promise slyly according to the regular accounts. bold requests alon 2 | BRAZIL | 2 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3 | CANADA | 2 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4 | EGYPT | 5 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5 | ETHIOPIA | 1 | ven packages wake quickly. regu - 6 | FRANCE | 4 | - 7 | GERMANY | 4 | + 6 | FRANCE | 4 | + 7 | GERMANY | 4 | (8 rows) --test with search_path is set @@ -419,16 +419,16 @@ SET search_path TO test_schema_support; UPDATE nation_hash SET n_regionkey = n_regionkey + 1; --verify modification SELECT * FROM nation_hash ORDER BY 1,2,3,4; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 0 | ALGERIA | 2 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 3 | al foxes promise slyly according to the regular accounts. bold requests alon 2 | BRAZIL | 3 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3 | CANADA | 3 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4 | EGYPT | 6 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5 | ETHIOPIA | 2 | ven packages wake quickly. regu - 6 | FRANCE | 5 | - 7 | GERMANY | 5 | + 6 | FRANCE | 5 | + 7 | GERMANY | 5 | (8 rows) --test COLLATION with schema @@ -443,28 +443,28 @@ CREATE TABLE test_schema_support.nation_hash_collation( n_comment varchar(152) ); SELECT master_get_table_ddl_events('test_schema_support.nation_hash_collation') ORDER BY 1; - master_get_table_ddl_events --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + master_get_table_ddl_events +--------------------------------------------------------------------- ALTER TABLE test_schema_support.nation_hash_collation OWNER TO postgres CREATE TABLE test_schema_support.nation_hash_collation (n_nationkey integer NOT NULL, n_name character(25) NOT NULL COLLATE test_schema_support.english, n_regionkey integer NOT NULL, n_comment character varying(152)) (2 rows) SELECT master_create_distributed_table('test_schema_support.nation_hash_collation', 'n_nationkey', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('test_schema_support.nation_hash_collation', 4, 2); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) \copy test_schema_support.nation_hash_collation FROM STDIN with delimiter '|'; SELECT * FROM test_schema_support.nation_hash_collation ORDER BY 1,2,3,4; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special @@ -474,8 +474,8 @@ SELECT * FROM test_schema_support.nation_hash_collation ORDER BY 1,2,3,4; (6 rows) SELECT n_comment FROM test_schema_support.nation_hash_collation ORDER BY n_comment COLLATE test_schema_support.english; - n_comment ------------------------------------------------------------------------------------------------------------- + n_comment +--------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold haggle. carefully final deposits detect slyly agai @@ -493,21 +493,21 @@ CREATE TABLE nation_hash_collation_search_path( n_comment varchar(152) ); SELECT master_create_distributed_table('nation_hash_collation_search_path', 'n_nationkey', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('nation_hash_collation_search_path', 4, 2); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) \copy nation_hash_collation_search_path FROM STDIN with delimiter '|'; SELECT * FROM nation_hash_collation_search_path ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; - n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------ + n_nationkey | n_name | n_regionkey | n_comment +--------------------------------------------------------------------- 5 | ETHIOPIA | 0 | ven packages wake quickly. regu 4 | EGYPT | 4 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold @@ -517,8 +517,8 @@ SELECT * FROM nation_hash_collation_search_path ORDER BY 1 DESC, 2 DESC, 3 DESC, (6 rows) SELECT n_comment FROM nation_hash_collation_search_path ORDER BY n_comment COLLATE english; - n_comment ------------------------------------------------------------------------------------------------------------- + n_comment +--------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold haggle. carefully final deposits detect slyly agai @@ -538,30 +538,30 @@ CREATE TABLE test_schema_support.nation_hash_composite_types( test_col test_schema_support.new_composite_type ); SELECT master_create_distributed_table('test_schema_support.nation_hash_composite_types', 'n_nationkey', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('test_schema_support.nation_hash_composite_types', 4, 2); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) -- insert some data to verify composite type queries \copy test_schema_support.nation_hash_composite_types FROM STDIN with delimiter '|'; SELECT * FROM test_schema_support.nation_hash_composite_types WHERE test_col = '(a,a)'::test_schema_support.new_composite_type; - n_nationkey | n_name | n_regionkey | n_comment | test_col --------------+---------------------------+-------------+----------------------------------------------------+---------- + n_nationkey | n_name | n_regionkey | n_comment | test_col +--------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) --test with search_path is set SET search_path TO test_schema_support; SELECT * FROM nation_hash_composite_types WHERE test_col = '(a,a)'::new_composite_type; - n_nationkey | n_name | n_regionkey | n_comment | test_col --------------+---------------------------+-------------+----------------------------------------------------+---------- + n_nationkey | n_name | n_regionkey | n_comment | test_col +--------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) @@ -570,24 +570,24 @@ SET search_path TO public; ALTER TABLE test_schema_support.nation_hash ADD COLUMN new_col INT; -- verify column is added SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass; - Column | Type | Modifiers --------------+------------------------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null - n_comment | character varying(152) | - new_col | integer | + n_comment | character varying(152) | + new_col | integer | (5 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; - Column | Type | Modifiers --------------+------------------------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null - n_comment | character varying(152) | - new_col | integer | + n_comment | character varying(152) | + new_col | integer | (5 rows) \c - - - :master_port @@ -596,22 +596,22 @@ NOTICE: column "non_existent_column" of relation "nation_hash" does not exist, ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS new_col; -- verify column is dropped SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass; - Column | Type | Modifiers --------------+------------------------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null - n_comment | character varying(152) | + n_comment | character varying(152) | (4 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; - Column | Type | Modifiers --------------+------------------------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null - n_comment | character varying(152) | + n_comment | character varying(152) | (4 rows) \c - - - :master_port @@ -620,24 +620,24 @@ SET search_path TO test_schema_support; ALTER TABLE nation_hash ADD COLUMN new_col INT; -- verify column is added SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass; - Column | Type | Modifiers --------------+------------------------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null - n_comment | character varying(152) | - new_col | integer | + n_comment | character varying(152) | + new_col | integer | (5 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; - Column | Type | Modifiers --------------+------------------------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null - n_comment | character varying(152) | - new_col | integer | + n_comment | character varying(152) | + new_col | integer | (5 rows) \c - - - :master_port @@ -647,22 +647,22 @@ NOTICE: column "non_existent_column" of relation "nation_hash" does not exist, ALTER TABLE nation_hash DROP COLUMN IF EXISTS new_col; -- verify column is dropped SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass; - Column | Type | Modifiers --------------+------------------------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null - n_comment | character varying(152) | + n_comment | character varying(152) | (4 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; - Column | Type | Modifiers --------------+------------------------+----------- + Column | Type | Modifiers +--------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null - n_comment | character varying(152) | + n_comment | character varying(152) | (4 rows) \c - - - :master_port @@ -673,16 +673,16 @@ CREATE INDEX index1 ON test_schema_support.nation_hash(n_name); --verify INDEX is created SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'test_schema_support.index1'::regclass; - Column | Type | Definition ---------+---------------+------------ + Column | Type | Definition +--------------------------------------------------------------------- n_name | character(25) | n_name (1 row) \c - - - :worker_1_port SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'test_schema_support.index1_1190003'::regclass; - Column | Type | Definition ---------+---------------+------------ + Column | Type | Definition +--------------------------------------------------------------------- n_name | character(25) | n_name (1 row) @@ -701,16 +701,16 @@ CREATE INDEX index1 ON nation_hash(n_name); --verify INDEX is created SELECT "Column", "Type", "Definition" FROM public.index_attrs WHERE relid = 'test_schema_support.index1'::regclass; - Column | Type | Definition ---------+---------------+------------ + Column | Type | Definition +--------------------------------------------------------------------- n_name | character(25) | n_name (1 row) \c - - - :worker_1_port SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'test_schema_support.index1_1190003'::regclass; - Column | Type | Definition ---------+---------------+------------ + Column | Type | Definition +--------------------------------------------------------------------- n_name | character(25) | n_name (1 row) @@ -728,15 +728,15 @@ SET search_path TO public; -- mark shard as inactive UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1190000 and nodeport = :worker_1_port; SELECT master_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localhost', :worker_1_port); - master_copy_shard_placement ------------------------------ - + master_copy_shard_placement +--------------------------------------------------------------------- + (1 row) -- verify shardstate SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid = 1190000 ORDER BY nodeport; - shardstate | nodename | nodeport -------------+-----------+---------- + shardstate | nodename | nodeport +--------------------------------------------------------------------- 1 | localhost | 57637 1 | localhost | 57638 (2 rows) @@ -746,15 +746,15 @@ SET search_path TO test_schema_support; -- mark shard as inactive UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1190000 and nodeport = :worker_1_port; SELECT master_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localhost', :worker_1_port); - master_copy_shard_placement ------------------------------ - + master_copy_shard_placement +--------------------------------------------------------------------- + (1 row) -- verify shardstate SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid = 1190000 ORDER BY nodeport; - shardstate | nodename | nodeport -------------+-----------+---------- + shardstate | nodename | nodeport +--------------------------------------------------------------------- 1 | localhost | 57637 1 | localhost | 57638 (2 rows) @@ -762,8 +762,8 @@ SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid -- test master_apply_delete_command with schemas SET search_path TO public; SELECT master_apply_delete_command('DELETE FROM test_schema_support.nation_append') ; - master_apply_delete_command ------------------------------ + master_apply_delete_command +--------------------------------------------------------------------- 1 (1 row) @@ -775,8 +775,8 @@ SELECT master_apply_delete_command('DELETE FROM test_schema_support.nation_appen SET search_path TO test_schema_support; \copy nation_append FROM STDIN with delimiter '|'; SELECT master_apply_delete_command('DELETE FROM nation_append') ; - master_apply_delete_command ------------------------------ + master_apply_delete_command +--------------------------------------------------------------------- 1 (1 row) @@ -808,23 +808,23 @@ CREATE TABLE test_schema_support_join_2.nation_hash ( n_regionkey integer not null, n_comment varchar(152)); SELECT create_distributed_table('test_schema_support_join_1.nation_hash', 'n_nationkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \copy test_schema_support_join_1.nation_hash FROM STDIN with delimiter '|'; SELECT create_distributed_table('test_schema_support_join_1.nation_hash_2', 'n_nationkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \copy test_schema_support_join_1.nation_hash_2 FROM STDIN with delimiter '|'; SELECT create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nationkey'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \copy test_schema_support_join_2.nation_hash FROM STDIN with delimiter '|'; @@ -838,8 +838,8 @@ FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) @@ -853,8 +853,8 @@ FROM nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) @@ -868,8 +868,8 @@ FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_1.nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) @@ -883,8 +883,8 @@ FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) @@ -900,8 +900,8 @@ FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) @@ -915,8 +915,8 @@ FROM nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) @@ -930,8 +930,8 @@ FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_regionkey; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) @@ -946,8 +946,8 @@ FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; - count -------- + count +--------------------------------------------------------------------- 14 (1 row) @@ -961,8 +961,8 @@ FROM nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; - count -------- + count +--------------------------------------------------------------------- 14 (1 row) @@ -976,8 +976,8 @@ FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_regionkey = n2.n_regionkey; - count -------- + count +--------------------------------------------------------------------- 14 (1 row) @@ -995,31 +995,31 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. CONTEXT: SQL statement "CREATE USER "test-user"" PL/pgSQL function run_command_on_coordinator_and_workers(text) line 3 at EXECUTE - run_command_on_coordinator_and_workers ----------------------------------------- - + run_command_on_coordinator_and_workers +--------------------------------------------------------------------- + (1 row) SELECT run_command_on_coordinator_and_workers('GRANT ALL ON DATABASE postgres to "test-user"'); - run_command_on_coordinator_and_workers ----------------------------------------- - + run_command_on_coordinator_and_workers +--------------------------------------------------------------------- + (1 row) CREATE SCHEMA schema_with_user AUTHORIZATION "test-user"; CREATE TABLE schema_with_user.test_table(column1 int); SELECT create_reference_table('schema_with_user.test_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- verify that owner of the created schema is test-user \c - - - :worker_1_port \dn schema_with_user List of schemas - Name | Owner -------------------+----------- + Name | Owner +--------------------------------------------------------------------- schema_with_user | test-user (1 row) @@ -1028,16 +1028,16 @@ SELECT create_reference_table('schema_with_user.test_table'); DROP OWNED BY "test-user" CASCADE; NOTICE: drop cascades to table schema_with_user.test_table SELECT run_command_on_workers('DROP OWNED BY "test-user" CASCADE'); - run_command_on_workers ----------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP OWNED") (localhost,57638,t,"DROP OWNED") (2 rows) SELECT run_command_on_coordinator_and_workers('DROP USER "test-user"'); - run_command_on_coordinator_and_workers ----------------------------------------- - + run_command_on_coordinator_and_workers +--------------------------------------------------------------------- + (1 row) DROP FUNCTION run_command_on_coordinator_and_workers(p_sql text); @@ -1045,9 +1045,9 @@ DROP FUNCTION run_command_on_coordinator_and_workers(p_sql text); CREATE SCHEMA run_test_schema; CREATE TABLE run_test_schema.test_table(id int); SELECT create_distributed_table('run_test_schema.test_table','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- randomly insert data to evaluate below UDFs better @@ -1056,14 +1056,14 @@ INSERT INTO run_test_schema.test_table VALUES(7); INSERT INTO run_test_schema.test_table VALUES(9); -- try UDFs which call shard_name as a subroutine SELECT sum(result::int) FROM run_command_on_placements('run_test_schema.test_table','SELECT pg_table_size(''%s'')'); - sum -------- + sum +--------------------------------------------------------------------- 49152 (1 row) SELECT sum(result::int) FROM run_command_on_shards('run_test_schema.test_table','SELECT pg_table_size(''%s'')'); - sum -------- + sum +--------------------------------------------------------------------- 24576 (1 row) @@ -1077,15 +1077,15 @@ CREATE TABLE "CiTuS.TeeN"."TeeNTabLE.1!?!"(id int, "TeNANt_Id" int); CREATE TABLE "CiTUS.TEEN2"."CAPITAL_TABLE"(i int, j int); -- create distributed table with weird names SELECT create_distributed_table('"CiTuS.TeeN"."TeeNTabLE.1!?!"', 'TeNANt_Id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('"CiTUS.TEEN2"."CAPITAL_TABLE"', 'i'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- truncate tables with weird names @@ -1093,8 +1093,8 @@ INSERT INTO "CiTuS.TeeN"."TeeNTabLE.1!?!" VALUES(1, 1); INSERT INTO "CiTUS.TEEN2"."CAPITAL_TABLE" VALUES(0, 1); TRUNCATE "CiTuS.TeeN"."TeeNTabLE.1!?!", "CiTUS.TEEN2"."CAPITAL_TABLE"; SELECT count(*) FROM "CiTUS.TEEN2"."CAPITAL_TABLE"; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -1106,8 +1106,8 @@ SELECT * FROM "CiTuS.TeeN"."TeeNTabLE.1!?!", "CiTUS.TEEN2"."CAPITAL_TABLE" WHERE "CiTUS.TEEN2"."CAPITAL_TABLE".i = "CiTuS.TeeN"."TeeNTabLE.1!?!"."TeNANt_Id" ORDER BY 1,2,3,4; - id | TeNANt_Id | i | j -----+-----------+---+--- + id | TeNANt_Id | i | j +--------------------------------------------------------------------- 0 | 1 | 1 | 0 1 | 0 | 0 | 1 1 | 1 | 1 | 0 @@ -1123,8 +1123,8 @@ FROM "CiTuS.TeeN"."TeeNTabLE.1!?!", "CiTUS.TEEN2"."CAPITAL_TABLE" WHERE "CiTUS.TEEN2"."CAPITAL_TABLE".i = "CiTuS.TeeN"."TeeNTabLE.1!?!"."TeNANt_Id" GROUP BY "TeNANt_Id", id, i, j HAVING "TeNANt_Id" > 0 AND j >= id ORDER BY "TeNANt_Id"; - id | TeNANt_Id | i | j -----+-----------+---+--- + id | TeNANt_Id | i | j +--------------------------------------------------------------------- 0 | 1 | 1 | 0 2 | 3 | 3 | 2 4 | 4 | 4 | 4 @@ -1136,8 +1136,8 @@ FROM "CiTuS.TeeN"."TeeNTabLE.1!?!" join "CiTUS.TEEN2"."CAPITAL_TABLE" on GROUP BY "TeNANt_Id", id, i, j HAVING "TeNANt_Id" > 0 AND j >= id ORDER BY 1,2,3,4; - id | TeNANt_Id | i | j -----+-----------+---+--- + id | TeNANt_Id | i | j +--------------------------------------------------------------------- 0 | 1 | 1 | 0 2 | 3 | 3 | 2 4 | 4 | 4 | 4 @@ -1153,8 +1153,8 @@ SELECT * FROM "cTE" join "CiTUS.TEEN2"."CAPITAL_TABLE" on GROUP BY "TeNANt_Id", id, i, j HAVING "TeNANt_Id" > 0 AND j >= id ORDER BY 1,2,3,4; - id | TeNANt_Id | i | j -----+-----------+---+--- + id | TeNANt_Id | i | j +--------------------------------------------------------------------- 0 | 1 | 1 | 0 2 | 3 | 3 | 2 4 | 4 | 4 | 4 @@ -1172,8 +1172,8 @@ join "CiTUS.TEEN2"."CAPITAL_TABLE" on GROUP BY "TeNANt_Id", id, i, j HAVING "TeNANt_Id" > 0 AND j >= id ORDER BY 1,2,3,4; - id | TeNANt_Id | i | j -----+-----------+---+--- + id | TeNANt_Id | i | j +--------------------------------------------------------------------- 0 | 1 | 1 | 0 2 | 3 | 3 | 2 4 | 4 | 4 | 4 diff --git a/src/test/regress/expected/multi_select_distinct.out b/src/test/regress/expected/multi_select_distinct.out index 840e2895a..433c420dc 100644 --- a/src/test/regress/expected/multi_select_distinct.out +++ b/src/test/regress/expected/multi_select_distinct.out @@ -6,13 +6,13 @@ ANALYZE lineitem_hash_part; -- function calls are supported SELECT DISTINCT l_orderkey, now() FROM lineitem_hash_part LIMIT 0; - l_orderkey | now -------------+----- + l_orderkey | now +--------------------------------------------------------------------- (0 rows) SELECT DISTINCT l_partkey, 1 + (random() * 0)::int FROM lineitem_hash_part ORDER BY 1 DESC LIMIT 3; - l_partkey | ?column? ------------+---------- + l_partkey | ?column? +--------------------------------------------------------------------- 199973 | 1 199946 | 1 199943 | 1 @@ -20,8 +20,8 @@ SELECT DISTINCT l_partkey, 1 + (random() * 0)::int FROM lineitem_hash_part ORDER -- const expressions are supported SELECT DISTINCT l_orderkey, 1+1 FROM lineitem_hash_part ORDER BY 1 LIMIT 5; - l_orderkey | ?column? -------------+---------- + l_orderkey | ?column? +--------------------------------------------------------------------- 1 | 2 2 | 2 3 | 2 @@ -31,8 +31,8 @@ SELECT DISTINCT l_orderkey, 1+1 FROM lineitem_hash_part ORDER BY 1 LIMIT 5; -- non const expressions are also supported SELECT DISTINCT l_orderkey, l_partkey + 1 FROM lineitem_hash_part ORDER BY 1, 2 LIMIT 5; - l_orderkey | ?column? -------------+---------- + l_orderkey | ?column? +--------------------------------------------------------------------- 1 | 2133 1 | 15636 1 | 24028 @@ -42,8 +42,8 @@ SELECT DISTINCT l_orderkey, l_partkey + 1 FROM lineitem_hash_part ORDER BY 1, 2 -- column expressions are supported SELECT DISTINCT l_orderkey, l_shipinstruct || l_shipmode FROM lineitem_hash_part ORDER BY 2 , 1 LIMIT 5; - l_orderkey | ?column? -------------+---------------- + l_orderkey | ?column? +--------------------------------------------------------------------- 32 | COLLECT CODAIR 39 | COLLECT CODAIR 66 | COLLECT CODAIR @@ -53,8 +53,8 @@ SELECT DISTINCT l_orderkey, l_shipinstruct || l_shipmode FROM lineitem_hash_part -- function calls with const input are supported SELECT DISTINCT l_orderkey, strpos('AIR', 'A') FROM lineitem_hash_part ORDER BY 1,2 LIMIT 5; - l_orderkey | strpos -------------+-------- + l_orderkey | strpos +--------------------------------------------------------------------- 1 | 1 2 | 1 3 | 1 @@ -68,8 +68,8 @@ SELECT DISTINCT l_orderkey, strpos(l_shipmode, 'I') WHERE strpos(l_shipmode, 'I') > 1 ORDER BY 2, 1 LIMIT 5; - l_orderkey | strpos -------------+-------- + l_orderkey | strpos +--------------------------------------------------------------------- 1 | 2 3 | 2 5 | 2 @@ -79,8 +79,8 @@ SELECT DISTINCT l_orderkey, strpos(l_shipmode, 'I') -- row types are supported SELECT DISTINCT (l_orderkey, l_partkey) AS pair FROM lineitem_hash_part ORDER BY 1 LIMIT 5; - pair ------------ + pair +--------------------------------------------------------------------- (1,2132) (1,15635) (1,24027) @@ -92,20 +92,20 @@ SELECT DISTINCT (l_orderkey, l_partkey) AS pair FROM lineitem_hash_part ORDER BY -- verify counts match with respect to count(distinct) CREATE TEMP TABLE temp_orderkeys AS SELECT DISTINCT l_orderkey FROM lineitem_hash_part; SELECT COUNT(*) FROM temp_orderkeys; - count -------- + count +--------------------------------------------------------------------- 2985 (1 row) SELECT COUNT(DISTINCT l_orderkey) FROM lineitem_hash_part; - count -------- + count +--------------------------------------------------------------------- 2985 (1 row) SELECT DISTINCT l_orderkey FROM lineitem_hash_part WHERE l_orderkey < 500 and l_partkey < 5000 order by 1; - l_orderkey ------------- + l_orderkey +--------------------------------------------------------------------- 1 3 32 @@ -128,8 +128,8 @@ SELECT DISTINCT l_orderkey FROM lineitem_hash_part WHERE l_orderkey < 500 and l_ -- distinct on non-partition column SELECT DISTINCT l_partkey FROM lineitem_hash_part WHERE l_orderkey > 5 and l_orderkey < 20 order by 1; - l_partkey ------------ + l_partkey +--------------------------------------------------------------------- 79251 94780 139636 @@ -141,15 +141,15 @@ SELECT DISTINCT l_partkey FROM lineitem_hash_part WHERE l_orderkey > 5 and l_ord (8 rows) SELECT DISTINCT l_shipmode FROM lineitem_hash_part ORDER BY 1 DESC; - l_shipmode ------------- - TRUCK - SHIP - REG AIR - RAIL - MAIL - FOB - AIR + l_shipmode +--------------------------------------------------------------------- + TRUCK + SHIP + REG AIR + RAIL + MAIL + FOB + AIR (7 rows) -- distinct with multiple columns @@ -157,8 +157,8 @@ SELECT DISTINCT l_orderkey, o_orderdate FROM lineitem_hash_part JOIN orders_hash_part ON (l_orderkey = o_orderkey) WHERE l_orderkey < 10 ORDER BY l_orderkey; - l_orderkey | o_orderdate -------------+------------- + l_orderkey | o_orderdate +--------------------------------------------------------------------- 1 | 01-02-1996 2 | 12-01-1996 3 | 10-14-1993 @@ -176,8 +176,8 @@ SELECT DISTINCT l_orderkey, count(*) GROUP BY 1 HAVING count(*) > 5 ORDER BY 2 DESC, 1; - l_orderkey | count -------------+------- + l_orderkey | count +--------------------------------------------------------------------- 7 | 7 68 | 7 129 | 7 @@ -207,8 +207,8 @@ EXPLAIN (COSTS FALSE) GROUP BY 1 HAVING count(*) > 5 ORDER BY 2 DESC, 1; - QUERY PLAN ----------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort Sort Key: remote_scan.count DESC, remote_scan.l_orderkey -> HashAggregate @@ -217,7 +217,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_orderkey Filter: (count(*) > 5) @@ -234,8 +234,8 @@ EXPLAIN (COSTS FALSE) GROUP BY 1 HAVING count(*) > 5 ORDER BY 2 DESC, 1; - QUERY PLAN ----------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort Sort Key: remote_scan.count DESC, remote_scan.l_orderkey -> Unique @@ -245,7 +245,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_orderkey Filter: (count(*) > 5) @@ -260,8 +260,8 @@ SELECT DISTINCT count(*) FROM lineitem_hash_part GROUP BY l_suppkey, l_linenumber ORDER BY 1; - count -------- + count +--------------------------------------------------------------------- 1 2 3 @@ -276,8 +276,8 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part GROUP BY l_suppkey, l_linenumber ORDER BY 1; - QUERY PLAN ----------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint)) -> HashAggregate @@ -288,7 +288,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -302,8 +302,8 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part GROUP BY l_suppkey, l_linenumber ORDER BY 1; - QUERY PLAN ----------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint)) -> Unique @@ -317,7 +317,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -331,8 +331,8 @@ SELECT DISTINCT l_suppkey, count(*) GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - l_suppkey | count ------------+------- + l_suppkey | count +--------------------------------------------------------------------- 1 | 1 2 | 1 3 | 1 @@ -352,8 +352,8 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.l_suppkey @@ -365,7 +365,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -380,8 +380,8 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.l_suppkey @@ -396,7 +396,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -410,8 +410,8 @@ SELECT DISTINCT l_suppkey, avg(l_partkey) GROUP BY l_suppkey, l_linenumber ORDER BY 1,2 LIMIT 10; - l_suppkey | avg ------------+------------------------ + l_suppkey | avg +--------------------------------------------------------------------- 1 | 190000.000000000000 2 | 172450.000000000000 3 | 112469.000000000000 @@ -432,8 +432,8 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1,2 LIMIT 10; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.l_suppkey, ((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1))) @@ -445,7 +445,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -460,8 +460,8 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1,2 LIMIT 10; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.l_suppkey, ((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1))) @@ -476,7 +476,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -489,8 +489,8 @@ SELECT DISTINCT ON (l_suppkey) avg(l_partkey) GROUP BY l_suppkey, l_linenumber ORDER BY l_suppkey,1 LIMIT 10; - avg ------------------------- + avg +--------------------------------------------------------------------- 190000.000000000000 172450.000000000000 112469.000000000000 @@ -511,8 +511,8 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY l_suppkey,1 LIMIT 10; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Unique -> Sort @@ -523,7 +523,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -538,8 +538,8 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY l_suppkey,1 LIMIT 10; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Unique -> Sort @@ -552,7 +552,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -565,8 +565,8 @@ SELECT DISTINCT avg(ceil(l_partkey / 2)) GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - avg ------ + avg +--------------------------------------------------------------------- 9 39 74 @@ -586,8 +586,8 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - QUERY PLAN ---------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: ((sum(remote_scan.avg) / (pg_catalog.sum(remote_scan.avg_1))::double precision)) @@ -599,7 +599,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -614,8 +614,8 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: ((sum(remote_scan.avg) / (pg_catalog.sum(remote_scan.avg_1))::double precision)) @@ -630,7 +630,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -643,8 +643,8 @@ SELECT DISTINCT sum(l_suppkey) + count(l_partkey) AS dis GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - dis ------ + dis +--------------------------------------------------------------------- 2 3 4 @@ -664,8 +664,8 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: (((pg_catalog.sum(remote_scan.dis))::bigint + COALESCE((pg_catalog.sum(remote_scan.dis_1))::bigint, '0'::bigint))) @@ -677,7 +677,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -692,8 +692,8 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: (((pg_catalog.sum(remote_scan.dis))::bigint + COALESCE((pg_catalog.sum(remote_scan.dis_1))::bigint, '0'::bigint))) @@ -708,7 +708,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -722,13 +722,13 @@ SELECT DISTINCT * GROUP BY 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16 ORDER BY 1,2 LIMIT 10; - l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment -------------+-----------+-----------+--------------+------------+-----------------+------------+-------+--------------+--------------+------------+--------------+---------------+---------------------------+------------+-------------------------------------------- + l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment +--------------------------------------------------------------------- 1 | 2132 | 4633 | 4 | 28.00 | 28955.64 | 0.09 | 0.06 | N | O | 04-21-1996 | 03-30-1996 | 05-16-1996 | NONE | AIR | lites. fluffily even de 1 | 15635 | 638 | 6 | 32.00 | 49620.16 | 0.07 | 0.02 | N | O | 01-30-1996 | 02-07-1996 | 02-03-1996 | DELIVER IN PERSON | MAIL | arefully slyly ex 1 | 24027 | 1534 | 5 | 24.00 | 22824.48 | 0.10 | 0.04 | N | O | 03-30-1996 | 03-14-1996 | 04-01-1996 | NONE | FOB | pending foxes. slyly re 1 | 63700 | 3701 | 3 | 8.00 | 13309.60 | 0.10 | 0.02 | N | O | 01-29-1996 | 03-05-1996 | 01-31-1996 | TAKE BACK RETURN | REG AIR | riously. regular, express dep - 1 | 67310 | 7311 | 2 | 36.00 | 45983.16 | 0.09 | 0.06 | N | O | 04-12-1996 | 02-28-1996 | 04-20-1996 | TAKE BACK RETURN | MAIL | ly final dependencies: slyly bold + 1 | 67310 | 7311 | 2 | 36.00 | 45983.16 | 0.09 | 0.06 | N | O | 04-12-1996 | 02-28-1996 | 04-20-1996 | TAKE BACK RETURN | MAIL | ly final dependencies: slyly bold 1 | 155190 | 7706 | 1 | 17.00 | 21168.23 | 0.04 | 0.02 | N | O | 03-13-1996 | 02-12-1996 | 03-22-1996 | DELIVER IN PERSON | TRUCK | egular courts above the 2 | 106170 | 1191 | 1 | 38.00 | 44694.46 | 0.00 | 0.05 | N | O | 01-28-1997 | 01-14-1997 | 02-02-1997 | TAKE BACK RETURN | RAIL | ven requests. deposits breach a 3 | 4297 | 1798 | 1 | 45.00 | 54058.05 | 0.06 | 0.00 | R | F | 02-02-1994 | 01-04-1994 | 02-23-1994 | NONE | AIR | ongside of the furiously brave acco @@ -744,8 +744,8 @@ EXPLAIN (COSTS FALSE) GROUP BY 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16 ORDER BY 1,2 LIMIT 10; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.l_orderkey, remote_scan.l_partkey @@ -755,7 +755,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Unique -> Group @@ -774,8 +774,8 @@ EXPLAIN (COSTS FALSE) GROUP BY 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16 ORDER BY 1,2 LIMIT 10; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.l_orderkey, remote_scan.l_partkey @@ -786,7 +786,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Unique -> Group @@ -802,8 +802,8 @@ SELECT DISTINCT count(DISTINCT l_partkey), count(DISTINCT l_shipmode) FROM lineitem_hash_part GROUP BY l_orderkey ORDER BY 1,2; - count | count --------+------- + count | count +--------------------------------------------------------------------- 1 | 1 2 | 1 2 | 2 @@ -838,8 +838,8 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part GROUP BY l_orderkey ORDER BY 1,2; - QUERY PLAN ----------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort Sort Key: remote_scan.count, remote_scan.count_1 -> HashAggregate @@ -848,7 +848,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> GroupAggregate Group Key: l_orderkey -> Sort @@ -864,8 +864,8 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part GROUP BY l_orderkey ORDER BY 1,2; - QUERY PLAN ----------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort Sort Key: remote_scan.count, remote_scan.count_1 -> Unique @@ -875,7 +875,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> GroupAggregate Group Key: l_orderkey -> Sort @@ -889,8 +889,8 @@ SELECT DISTINCT ceil(count(case when l_partkey > 100000 THEN 1 ELSE 0 END) / 2) FROM lineitem_hash_part GROUP BY l_suppkey ORDER BY 1; - count -------- + count +--------------------------------------------------------------------- 0 1 2 @@ -904,8 +904,8 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part GROUP BY l_suppkey ORDER BY 1; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Sort Sort Key: (ceil(((COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint) / 2))::double precision)) -> HashAggregate @@ -916,7 +916,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_suppkey -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -929,8 +929,8 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part GROUP BY l_suppkey ORDER BY 1; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort Sort Key: (ceil(((COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint) / 2))::double precision)) -> Unique @@ -944,7 +944,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_suppkey -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -958,8 +958,8 @@ EXPLAIN (COSTS FALSE) GROUP BY l_orderkey ORDER BY 2 LIMIT 15; - QUERY PLAN ----------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.array_length @@ -969,7 +969,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> GroupAggregate Group Key: l_orderkey -> Sort @@ -985,8 +985,8 @@ EXPLAIN (COSTS FALSE) GROUP BY l_orderkey ORDER BY 2 LIMIT 15; - QUERY PLAN ----------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.array_length @@ -997,7 +997,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> GroupAggregate Group Key: l_orderkey -> Sort @@ -1013,8 +1013,8 @@ SELECT DISTINCT l_partkey, count(*) GROUP BY 1 HAVING count(*) > 2 ORDER BY 1; - l_partkey | count ------------+------- + l_partkey | count +--------------------------------------------------------------------- 1051 | 3 1927 | 3 6983 | 3 @@ -1035,8 +1035,8 @@ EXPLAIN (COSTS FALSE) GROUP BY 1 HAVING count(*) > 2 ORDER BY 1; - QUERY PLAN ----------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort Sort Key: remote_scan.l_partkey -> HashAggregate @@ -1046,7 +1046,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_partkey -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -1059,8 +1059,8 @@ SELECT DISTINCT l_partkey, avg(l_linenumber) GROUP BY 1 HAVING avg(l_linenumber) > 2 ORDER BY 1; - l_partkey | avg ------------+-------------------- + l_partkey | avg +--------------------------------------------------------------------- 18 | 7.0000000000000000 79 | 6.0000000000000000 149 | 4.5000000000000000 @@ -1083,8 +1083,8 @@ SELECT DISTINCT l_partkey, l_suppkey FROM lineitem_hash_part WHERE l_shipmode = 'AIR' AND l_orderkey < 100 ORDER BY 1, 2; - l_partkey | l_suppkey ------------+----------- + l_partkey | l_suppkey +--------------------------------------------------------------------- 2132 | 4633 4297 | 1798 37531 | 35 @@ -1107,8 +1107,8 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part WHERE l_shipmode = 'AIR' AND l_orderkey < 100 ORDER BY 1, 2; - QUERY PLAN ------------------------------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Sort Sort Key: remote_scan.l_partkey, remote_scan.l_suppkey -> HashAggregate @@ -1117,7 +1117,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Unique -> Sort Sort Key: l_partkey, l_suppkey @@ -1130,8 +1130,8 @@ SELECT DISTINCT ON (l_orderkey) l_orderkey, l_partkey, l_suppkey FROM lineitem_hash_part WHERE l_orderkey < 35 ORDER BY 1; - l_orderkey | l_partkey | l_suppkey -------------+-----------+----------- + l_orderkey | l_partkey | l_suppkey +--------------------------------------------------------------------- 1 | 155190 | 7706 2 | 106170 | 1191 3 | 4297 | 1798 @@ -1149,8 +1149,8 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part WHERE l_orderkey < 35 ORDER BY 1; - QUERY PLAN ----------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Unique -> Sort Sort Key: remote_scan.l_orderkey @@ -1158,7 +1158,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Unique -> Sort Sort Key: l_orderkey @@ -1174,8 +1174,8 @@ SELECT DISTINCT ON (l_partkey) l_partkey, l_orderkey FROM lineitem_hash_part ORDER BY 1,2 LIMIT 20; - l_partkey | l_orderkey ------------+------------ + l_partkey | l_orderkey +--------------------------------------------------------------------- 18 | 12005 79 | 5121 91 | 2883 @@ -1203,8 +1203,8 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part ORDER BY 1,2 LIMIT 20; - QUERY PLAN ----------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Unique -> Sort @@ -1213,7 +1213,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Unique -> Sort @@ -1227,8 +1227,8 @@ SELECT DISTINCT ON (o_custkey) o_custkey, l_orderkey FROM lineitem_hash_part JOIN orders_hash_part ON (l_orderkey = o_orderkey) WHERE o_custkey < 15 ORDER BY 1,2; - o_custkey | l_orderkey ------------+------------ + o_custkey | l_orderkey +--------------------------------------------------------------------- 1 | 9154 2 | 10563 4 | 320 @@ -1248,8 +1248,8 @@ EXPLAIN (COSTS FALSE) WHERE o_custkey < 15 ORDER BY 1,2; $Q$); - coordinator_plan ------------------------------------------------------------------ + coordinator_plan +--------------------------------------------------------------------- Unique -> Sort Sort Key: remote_scan.o_custkey, remote_scan.l_orderkey @@ -1265,8 +1265,8 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part JOIN orders_hash_part ON (l_orderkey = o_orderkey) WHERE o_custkey < 15; $Q$); - coordinator_plan ------------------------------------------- + coordinator_plan +--------------------------------------------------------------------- Unique -> Sort Sort Key: remote_scan.o_custkey @@ -1279,8 +1279,8 @@ SELECT DISTINCT ON (o_custkey, l_orderkey) o_custkey, l_orderkey, l_linenumber, FROM lineitem_hash_part JOIN orders_hash_part ON (l_orderkey = o_orderkey) WHERE o_custkey < 20 ORDER BY 1,2,3; - o_custkey | l_orderkey | l_linenumber | l_partkey ------------+------------+--------------+----------- + o_custkey | l_orderkey | l_linenumber | l_partkey +--------------------------------------------------------------------- 1 | 9154 | 1 | 86513 1 | 14656 | 1 | 59539 2 | 10563 | 1 | 147459 @@ -1326,8 +1326,8 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part JOIN orders_hash_part ON (l_orderkey = o_orderkey) WHERE o_custkey < 20; $Q$); - coordinator_plan ------------------------------------------------------------------ + coordinator_plan +--------------------------------------------------------------------- Unique -> Sort Sort Key: remote_scan.o_custkey, remote_scan.l_orderkey @@ -1340,8 +1340,8 @@ SELECT DISTINCT ON (o_custkey, l_orderkey) o_custkey, l_orderkey, l_linenumber, FROM lineitem_hash_part JOIN orders_hash_part ON (l_orderkey = o_orderkey) WHERE o_custkey < 15 ORDER BY 1,2,3 DESC; - o_custkey | l_orderkey | l_linenumber | l_partkey ------------+------------+--------------+----------- + o_custkey | l_orderkey | l_linenumber | l_partkey +--------------------------------------------------------------------- 1 | 9154 | 7 | 173448 1 | 14656 | 1 | 59539 2 | 10563 | 4 | 110741 @@ -1380,8 +1380,8 @@ SELECT DISTINCT l_orderkey, l_partkey ) q ORDER BY 1,2 LIMIT 10; - l_orderkey | l_partkey -------------+----------- + l_orderkey | l_partkey +--------------------------------------------------------------------- 1 | 2132 1 | 15635 1 | 24027 @@ -1402,8 +1402,8 @@ EXPLAIN (COSTS FALSE) ) q ORDER BY 1,2 LIMIT 10; - QUERY PLAN ----------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.l_orderkey, remote_scan.l_partkey @@ -1413,7 +1413,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Sort Sort Key: l_orderkey, l_partkey @@ -1430,8 +1430,8 @@ SELECT DISTINCT l_orderkey, cnt ) q ORDER BY 1,2 LIMIT 10; - l_orderkey | cnt -------------+----- + l_orderkey | cnt +--------------------------------------------------------------------- 1 | 6 2 | 1 3 | 6 @@ -1453,8 +1453,8 @@ EXPLAIN (COSTS FALSE) ) q ORDER BY 1,2 LIMIT 10; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.l_orderkey, remote_scan.cnt @@ -1464,7 +1464,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Sort Sort Key: lineitem_hash_part.l_orderkey, (count(*)) @@ -1485,8 +1485,8 @@ SELECT DISTINCT ON (l_orderkey) l_orderkey, l_partkey WHERE r > 1 ORDER BY 1,2 LIMIT 10; - l_orderkey | l_partkey -------------+----------- + l_orderkey | l_partkey +--------------------------------------------------------------------- 1 | 2132 2 | 106170 3 | 4297 @@ -1508,8 +1508,8 @@ EXPLAIN (COSTS FALSE) WHERE r > 1 ORDER BY 1,2 LIMIT 10; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Unique -> Sort @@ -1518,7 +1518,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Unique -> Sort @@ -1537,8 +1537,8 @@ SELECT DISTINCT ON (l_partkey) l_orderkey, l_partkey WHERE r > 1 ORDER BY 2,1 LIMIT 10; - l_orderkey | l_partkey -------------+----------- + l_orderkey | l_partkey +--------------------------------------------------------------------- 12005 | 18 5121 | 79 2883 | 91 @@ -1560,8 +1560,8 @@ EXPLAIN (COSTS FALSE) WHERE r > 1 ORDER BY 2,1 LIMIT 10; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Unique -> Sort @@ -1570,7 +1570,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Unique -> Sort diff --git a/src/test/regress/expected/multi_select_for_update.out b/src/test/regress/expected/multi_select_for_update.out index f357a27b6..5267e7503 100644 --- a/src/test/regress/expected/multi_select_for_update.out +++ b/src/test/regress/expected/multi_select_for_update.out @@ -2,67 +2,67 @@ -- MULTI_SIZE_QUERIES -- -- Test checks whether size of distributed tables can be obtained with citus_table_size. --- To find the relation size and total relation size citus_relation_size and +-- To find the relation size and total relation size citus_relation_size and -- citus_total_relation_size are also tested. SET citus.next_shard_id TO 1460000; SET citus.shard_replication_factor to 1; CREATE TABLE test_table_1_rf1(id int, val_1 int); SELECT create_distributed_table('test_table_1_rf1','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_table_1_rf1 values(1,2),(2,3),(3,4),(15,16); CREATE TABLE test_table_2_rf1(id int, val_1 int); SELECT create_distributed_table('test_table_2_rf1','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_table_2_rf1 values(1,2),(2,3),(3,4); CREATE TABLE ref_table(id int, val_1 int); SELECT create_reference_table('ref_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO ref_table values(1,2),(3,4),(5,6); CREATE TABLE ref_table_2(id int, val_1 int); SELECT create_reference_table('ref_table_2'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO ref_table_2 values(3,4),(5,6),(8,9); SET citus.shard_replication_factor to 2; CREATE TABLE test_table_3_rf2(id int, val_1 int); SELECT create_distributed_table('test_table_3_rf2','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_table_3_rf2 values(1,2),(2,3),(3,4); CREATE TABLE test_table_4_rf2(id int, val_1 int); SELECT create_distributed_table('test_table_4_rf2','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_table_4_rf2 values(1,2),(2,3),(3,4); -- Hash tables with RF = 1 is supported for router planner queries SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN test_table_1_rf1 as tt2 on tt1.id = tt2.id - WHERE tt1.id = 1 + WHERE tt1.id = 1 ORDER BY 1 FOR UPDATE; - id | val_1 | id | val_1 -----+-------+----+------- + id | val_1 | id | val_1 +--------------------------------------------------------------------- 1 | 2 | 1 | 2 (1 row) @@ -71,8 +71,8 @@ SELECT * FROM test_table_1_rf1 as tt1 WHERE tt1.id = 1 OR tt1.id = 15 ORDER BY 1 FOR UPDATE; - id | val_1 -----+------- + id | val_1 +--------------------------------------------------------------------- 1 | 2 15 | 16 (2 rows) @@ -109,8 +109,8 @@ SELECT * FROM WHERE tt1.id = 1 ORDER BY 1 FOR UPDATE; - id | val_1 | id | val_1 -----+-------+----+------- + id | val_1 | id | val_1 +--------------------------------------------------------------------- 1 | 2 | 1 | 2 (1 row) @@ -120,8 +120,8 @@ SELECT * FROM WHERE tt1.id = 1 ORDER BY 1 FOR SHARE; - id | val_1 | id | val_1 -----+-------+----+------- + id | val_1 | id | val_1 +--------------------------------------------------------------------- 1 | 2 | 1 | 2 (1 row) @@ -131,8 +131,8 @@ SELECT * FROM ORDER BY 1 FOR UPDATE OF rt1; - id | val_1 | id | val_1 -----+-------+----+------- + id | val_1 | id | val_1 +--------------------------------------------------------------------- 3 | 4 | 3 | 4 5 | 6 | 5 | 6 (2 rows) @@ -144,18 +144,18 @@ SELECT * FROM FOR UPDATE OF rt1 NOWAIT; - id | val_1 | id | val_1 -----+-------+----+------- + id | val_1 | id | val_1 +--------------------------------------------------------------------- 3 | 4 | 3 | 4 5 | 6 | 5 | 6 (2 rows) -- queries with CTEs are supported -WITH first_value AS ( +WITH first_value AS ( SELECT val_1 FROM test_table_1_rf1 WHERE id = 1 FOR UPDATE) SELECT * FROM first_value; - val_1 -------- + val_1 +--------------------------------------------------------------------- 2 (1 row) @@ -164,15 +164,15 @@ WITH update_table AS ( UPDATE test_table_1_rf1 SET val_1 = 10 WHERE id = 1 RETURNING * ) SELECT * FROM update_table FOR UPDATE; - id | val_1 -----+------- + id | val_1 +--------------------------------------------------------------------- 1 | 10 (1 row) -- Subqueries also supported SELECT * FROM (SELECT * FROM test_table_1_rf1 FOR UPDATE) foo WHERE id = 1; - id | val_1 -----+------- + id | val_1 +--------------------------------------------------------------------- 1 | 10 (1 row) diff --git a/src/test/regress/expected/multi_shard_modify.out b/src/test/regress/expected/multi_shard_modify.out index 792fb80ac..61eb0921e 100644 --- a/src/test/regress/expected/multi_shard_modify.out +++ b/src/test/regress/expected/multi_shard_modify.out @@ -8,9 +8,9 @@ CREATE TABLE multi_shard_modify_test ( t_name varchar(25) not null, t_value integer not null); SELECT create_distributed_table('multi_shard_modify_test', 't_key', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) COPY multi_shard_modify_test (t_key, t_name, t_value) FROM STDIN WITH (FORMAT 'csv'); @@ -20,23 +20,23 @@ BEGIN; SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 10 AND t_key <= 13'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = 202'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) ROLLBACK; SELECT count(*) FROM multi_shard_modify_test; - count -------- + count +--------------------------------------------------------------------- 27 (1 row) @@ -53,8 +53,8 @@ ERROR: functions used in the WHERE clause of modification queries on distribute SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = abs(-3)'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) @@ -62,8 +62,8 @@ HINT: Run the command directly SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = (3*18-40)'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) @@ -75,9 +75,9 @@ HINT: Run the command directly ERROR: relation temp_nations is not distributed -- commands with a USING clause are unsupported SELECT create_distributed_table('temp_nations', 'name', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test USING temp_nations WHERE multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''foobar'' '); @@ -88,8 +88,8 @@ ERROR: complex joins are only supported when all distributed tables are co-loca SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = 3 RETURNING *'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) @@ -101,22 +101,22 @@ ERROR: cannot perform an INSERT without a partition column value -- Check that we can successfully delete from multiple shards with 1PC SET citus.multi_shard_commit_protocol TO '1pc'; SELECT count(*) FROM multi_shard_modify_test; - count -------- + count +--------------------------------------------------------------------- 25 (1 row) SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 200'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM multi_shard_modify_test; - count -------- + count +--------------------------------------------------------------------- 23 (1 row) @@ -125,14 +125,14 @@ SET citus.multi_shard_commit_protocol TO '2pc'; SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 100'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM multi_shard_modify_test; - count -------- + count +--------------------------------------------------------------------- 21 (1 row) @@ -145,8 +145,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 15 - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) @@ -155,8 +155,8 @@ SET client_min_messages TO NOTICE; SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_name LIKE ''barce%'' '); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) @@ -164,14 +164,14 @@ HINT: Run the command directly SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''warsaw'' WHERE t_key=17'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) SELECT t_name FROM multi_shard_modify_test WHERE t_key=17; - t_name --------- + t_name +--------------------------------------------------------------------- warsaw (1 row) @@ -179,14 +179,14 @@ SELECT t_name FROM multi_shard_modify_test WHERE t_key=17; SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''???'' WHERE t_key>30 AND t_key<35'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) SELECT t_name FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; - t_name --------- + t_name +--------------------------------------------------------------------- ??? ??? ??? @@ -197,14 +197,14 @@ SELECT t_name FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value=8*37 WHERE t_key>30 AND t_key<35'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) SELECT t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; - t_value ---------- + t_value +--------------------------------------------------------------------- 296 296 296 @@ -215,14 +215,14 @@ SELECT t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''somename'', t_value=333 WHERE t_key>30 AND t_key<35'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) SELECT t_name, t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; - t_name | t_value -----------+--------- + t_name | t_value +--------------------------------------------------------------------- somename | 333 somename | 333 somename | 333 @@ -233,14 +233,14 @@ SELECT t_name, t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''nice city'' WHERE t_value < 0'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) SELECT t_name FROM multi_shard_modify_test WHERE t_value < 0; - t_name ------------ + t_name +--------------------------------------------------------------------- nice city nice city (2 rows) @@ -254,8 +254,8 @@ ERROR: modifying the partition value of rows is not allowed SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name = ''FAIL'' FROM temp_nations WHERE multi_shard_modify_test.t_key = 3 AND multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''dummy'' '); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) @@ -263,8 +263,8 @@ HINT: Run the command directly SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''FAIL'' WHERE t_key=4 RETURNING *'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) @@ -277,14 +277,14 @@ ERROR: cannot perform an INSERT without a partition column value SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value=t_key WHERE t_key = 10'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; - t_value ---------- + t_value +--------------------------------------------------------------------- 10 (1 row) @@ -292,14 +292,14 @@ SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = t_value + 37 WHERE t_key = 10'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; - t_value ---------- + t_value +--------------------------------------------------------------------- 47 (1 row) @@ -308,8 +308,8 @@ CREATE FUNCTION temp_stable_func() RETURNS integer AS 'SELECT 10;' LANGUAGE SQL SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name = ''FAIL!'' WHERE t_key = temp_stable_func()'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) @@ -317,14 +317,14 @@ HINT: Run the command directly SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = abs(-78) WHERE t_key = 10'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; - t_value ---------- + t_value +--------------------------------------------------------------------- 78 (1 row) @@ -332,8 +332,8 @@ SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = temp_stable_func() * 2 WHERE t_key = 10'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) @@ -346,8 +346,8 @@ ERROR: functions used in UPDATE queries on distributed tables must not be VOLAT SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = temp_stable_func()'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards -------------------------------- + master_modify_multiple_shards +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/multi_shard_update_delete.out b/src/test/regress/expected/multi_shard_update_delete.out index 7f6715cb1..f00fe3570 100644 --- a/src/test/regress/expected/multi_shard_update_delete.out +++ b/src/test/regress/expected/multi_shard_update_delete.out @@ -7,74 +7,74 @@ SET citus.shard_replication_factor to 1; SET citus.multi_shard_modify_mode to 'parallel'; CREATE TABLE users_test_table(user_id int, value_1 int, value_2 int, value_3 int); SELECT create_distributed_table('users_test_table', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \COPY users_test_table FROM STDIN DELIMITER AS ','; CREATE TABLE events_test_table (user_id int, value_1 int, value_2 int, value_3 int); SELECT create_distributed_table('events_test_table', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \COPY events_test_table FROM STDIN DELIMITER AS ','; CREATE TABLE events_reference_copy_table (like events_test_table); SELECT create_reference_table('events_reference_copy_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO events_reference_copy_table SELECT * FROM events_test_table; CREATE TABLE users_reference_copy_table (like users_test_table); SELECT create_reference_table('users_reference_copy_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO users_reference_copy_table SELECT * FROM users_test_table; -- Run multi shard updates and deletes without transaction on hash distributed tables UPDATE users_test_table SET value_1 = 1; SELECT COUNT(*), SUM(value_1) FROM users_test_table; - count | sum --------+----- + count | sum +--------------------------------------------------------------------- 15 | 15 (1 row) SELECT COUNT(*), SUM(value_2) FROM users_test_table WHERE user_id = 1 or user_id = 3; - count | sum --------+----- + count | sum +--------------------------------------------------------------------- 4 | 52 (1 row) UPDATE users_test_table SET value_2 = value_2 + 1 WHERE user_id = 1 or user_id = 3; SELECT COUNT(*), SUM(value_2) FROM users_test_table WHERE user_id = 1 or user_id = 3; - count | sum --------+----- + count | sum +--------------------------------------------------------------------- 4 | 56 (1 row) UPDATE users_test_table SET value_3 = 0 WHERE user_id <> 5; SELECT SUM(value_3) FROM users_test_table WHERE user_id <> 5; - sum ------ + sum +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) FROM users_test_table WHERE user_id = 3 or user_id = 5; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) DELETE FROM users_test_table WHERE user_id = 3 or user_id = 5; SELECT COUNT(*) FROM users_test_table WHERE user_id = 3 or user_id = 5; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -83,8 +83,8 @@ BEGIN; UPDATE users_test_table SET value_3 = 0; END; SELECT SUM(value_3) FROM users_test_table; - sum ------ + sum +--------------------------------------------------------------------- 0 (1 row) @@ -93,8 +93,8 @@ BEGIN; UPDATE users_test_table SET value_3 = 1; ROLLBACK; SELECT SUM(value_3) FROM users_test_table; - sum ------ + sum +--------------------------------------------------------------------- 0 (1 row) @@ -106,8 +106,8 @@ SET citus.multi_shard_modify_mode to sequential; UPDATE users_test_table SET value_3 = 1; END; SELECT SUM(value_3) FROM users_test_table; - sum ------ + sum +--------------------------------------------------------------------- 16 (1 row) @@ -119,8 +119,6 @@ UPDATE users_test_table SET value_3 = 0; END; SELECT SUM(value_3) FROM users_copy_table; ERROR: relation "users_copy_table" does not exist -LINE 1: SELECT SUM(value_3) FROM users_copy_table; - ^ -- Run multiple multi shard updates (with parallel executor) SET citus.multi_shard_modify_mode to 'parallel'; UPDATE users_test_table SET value_3 = 5; @@ -129,23 +127,23 @@ UPDATE users_test_table SET value_3 = 2; UPDATE users_test_table SET value_3 = 0; END; SELECT SUM(value_3) FROM users_test_table; - sum ------ + sum +--------------------------------------------------------------------- 0 (1 row) -- Check with kind of constraints UPDATE users_test_table SET value_3 = 1 WHERE user_id = 3 or true; SELECT COUNT(*), SUM(value_3) FROM users_test_table; - count | sum --------+----- + count | sum +--------------------------------------------------------------------- 16 | 16 (1 row) UPDATE users_test_table SET value_3 = 0 WHERE user_id = 20 and false; SELECT COUNT(*), SUM(value_3) FROM users_test_table; - count | sum --------+----- + count | sum +--------------------------------------------------------------------- 16 | 16 (1 row) @@ -158,8 +156,8 @@ EXECUTE foo_plan(7,35); EXECUTE foo_plan(9,45); EXECUTE foo_plan(0,0); SELECT SUM(value_1), SUM(value_3) FROM users_test_table; - sum | sum ------+----- + sum | sum +--------------------------------------------------------------------- 0 | 0 (1 row) @@ -176,39 +174,39 @@ INSERT INTO append_stage_table_2 VALUES(9,2); INSERT INTO append_stage_table_2 VALUES(10,4); CREATE TABLE test_append_table(id int, col_2 int); SELECT create_distributed_table('test_append_table','id','append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_empty_shard('test_append_table'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 1440010 (1 row) SELECT * FROM master_append_table_to_shard(1440010, 'append_stage_table', 'localhost', :master_port); - master_append_table_to_shard ------------------------------- + master_append_table_to_shard +--------------------------------------------------------------------- 0.00533333 (1 row) SELECT master_create_empty_shard('test_append_table') AS new_shard_id; - new_shard_id --------------- + new_shard_id +--------------------------------------------------------------------- 1440011 (1 row) SELECT * FROM master_append_table_to_shard(1440011, 'append_stage_table_2', 'localhost', :master_port); - master_append_table_to_shard ------------------------------- + master_append_table_to_shard +--------------------------------------------------------------------- 0.00533333 (1 row) UPDATE test_append_table SET col_2 = 5; SELECT * FROM test_append_table ORDER BY 1 DESC, 2 DESC; - id | col_2 -----+------- + id | col_2 +--------------------------------------------------------------------- 10 | 5 9 | 5 8 | 5 @@ -229,16 +227,16 @@ CREATE TABLE tt1_1120 partition of tt1 for VALUES FROM (11) to (20); INSERT INTO tt1 VALUES (1,11), (3,15), (5,17), (6,19), (8,17), (2,12); SELECT create_distributed_table('tt1','id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) UPDATE tt1 SET col_2 = 13; DELETE FROM tt1 WHERE id = 1 or id = 3 or id = 5; SELECT * FROM tt1 ORDER BY 1 DESC, 2 DESC; - id | col_2 -----+------- + id | col_2 +--------------------------------------------------------------------- 8 | 13 6 | 13 2 | 13 @@ -255,8 +253,8 @@ UPDATE tt1 SET col_2 = 12 WHERE col_2 > 10 and col_2 < 20; UPDATE tt1 SET col_2 = 7 WHERE col_2 < 10 and col_2 > 5; COMMIT; SELECT * FROM tt1 ORDER BY id; - id | col_2 -----+------- + id | col_2 +--------------------------------------------------------------------- 2 | 12 4 | 7 6 | 12 @@ -273,17 +271,17 @@ DELETE FROM tt1_510; DELETE FROM tt1_1120; COMMIT; SELECT * FROM tt1 ORDER BY id; - id | col_2 -----+------- + id | col_2 +--------------------------------------------------------------------- (0 rows) DROP TABLE tt1; -- Update and copy in the same transaction CREATE TABLE tt2(id int, col_2 int); SELECT create_distributed_table('tt2','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -291,8 +289,8 @@ BEGIN; UPDATE tt2 SET col_2 = 1; COMMIT; SELECT * FROM tt2 ORDER BY id; - id | col_2 -----+------- + id | col_2 +--------------------------------------------------------------------- 1 | 1 2 | 1 3 | 1 @@ -302,8 +300,8 @@ SELECT * FROM tt2 ORDER BY id; -- Test returning with both type of executors UPDATE tt2 SET col_2 = 5 RETURNING id, col_2; - id | col_2 -----+------- + id | col_2 +--------------------------------------------------------------------- 1 | 5 2 | 5 3 | 5 @@ -313,8 +311,8 @@ UPDATE tt2 SET col_2 = 5 RETURNING id, col_2; SET citus.multi_shard_modify_mode to sequential; UPDATE tt2 SET col_2 = 3 RETURNING id, col_2; - id | col_2 -----+------- + id | col_2 +--------------------------------------------------------------------- 1 | 3 2 | 3 3 | 3 @@ -329,9 +327,9 @@ SET citus.multi_shard_modify_mode to DEFAULT; SET citus.shard_count to 6; CREATE TABLE events_test_table_2 (user_id int, value_1 int, value_2 int, value_3 int); SELECT create_distributed_table('events_test_table_2', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \COPY events_test_table_2 FROM STDIN DELIMITER AS ','; @@ -343,9 +341,9 @@ INSERT INTO test_table_1 VALUES(2, '2015-02-01 08:31:16', 7); INSERT INTO test_table_1 VALUES(3, '2111-01-12 08:35:19', 9); SELECT create_distributed_table('test_table_1', 'id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- We can pushdown query if there is partition key equality @@ -369,8 +367,8 @@ WHERE now() > (SELECT max(date_col) WHERE test_table_1.id = events_test_table_2.user_id GROUP BY id) RETURNING *; - user_id | value_1 | value_2 | value_3 ----------+---------+---------+--------- + user_id | value_1 | value_2 | value_3 +--------------------------------------------------------------------- 1 | 5 | 7 | 7 1 | 20 | 12 | 25 1 | 60 | 17 | 17 @@ -395,8 +393,8 @@ WHERE user_id IN (SELECT user_id UNION SELECT user_id FROM events_test_table) returning value_3; - value_3 ---------- + value_3 +--------------------------------------------------------------------- 0 0 0 @@ -417,8 +415,8 @@ WHERE user_id IN (SELECT user_id UNION ALL SELECT user_id FROM events_test_table) returning value_3; - value_3 ---------- + value_3 +--------------------------------------------------------------------- 0 0 0 @@ -493,8 +491,8 @@ INSERT INTO users_test_table SELECT * FROM events_test_table WHERE events_test_table.user_id = 1 OR events_test_table.user_id = 5; SELECT SUM(value_2) FROM users_test_table; - sum ------ + sum +--------------------------------------------------------------------- 169 (1 row) @@ -503,8 +501,8 @@ SET value_2 = 1 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; SELECT SUM(value_2) FROM users_test_table; - sum ------ + sum +--------------------------------------------------------------------- 97 (1 row) @@ -513,9 +511,9 @@ COMMIT; CREATE SCHEMA sec_schema; CREATE TABLE sec_schema.tt1(id int, value_1 int); SELECT create_distributed_table('sec_schema.tt1','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO sec_schema.tt1 values(1,1),(2,2),(7,7),(9,9); @@ -525,8 +523,8 @@ WHERE id < (SELECT max(value_2) FROM events_test_table_2 WHERE sec_schema.tt1.id = events_test_table_2.user_id GROUP BY user_id) RETURNING *; - id | value_1 -----+--------- + id | value_1 +--------------------------------------------------------------------- 7 | 11 9 | 11 (2 rows) @@ -563,8 +561,8 @@ SET col_3 = 6 WHERE date_col IN (SELECT now()); -- Test with prepared statements SELECT COUNT(*) FROM users_test_table WHERE value_1 = 0; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -579,8 +577,8 @@ EXECUTE foo_plan_2(7,35); EXECUTE foo_plan_2(9,45); EXECUTE foo_plan_2(0,0); SELECT COUNT(*) FROM users_test_table WHERE value_1 = 0; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) @@ -615,13 +613,13 @@ UPDATE users_test_table as utt SET value_1 = 3 WHERE value_2 > (SELECT value_3 FROM events_test_table as ett WHERE utt.user_id = ett.user_id); ERROR: more than one row returned by a subquery used as an expression -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- We can not pushdown a query if the target relation is reference table UPDATE users_reference_copy_table SET value_2 = 5 FROM events_test_table WHERE users_reference_copy_table.user_id = events_test_table.user_id; -ERROR: only reference tables may be queried when targeting a reference table with multi shard UPDATE/DELETE queries with multiple tables +ERROR: only reference tables may be queried when targeting a reference table with multi shard UPDATE/DELETE queries with multiple tables -- We cannot push down it if the query has outer join and using UPDATE events_test_table SET value_2 = users_test_table.user_id @@ -754,8 +752,8 @@ ERROR: more than one row returned by a subquery used as an expression BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM users_test_table ORDER BY user_id; FETCH test_cursor; - user_id | value_1 | value_2 | value_3 ----------+---------+---------+--------- + user_id | value_1 | value_2 | value_3 +--------------------------------------------------------------------- 1 | 2 | 5 | 0 (1 row) @@ -764,8 +762,8 @@ ERROR: cannot run DML queries with cursors ROLLBACK; -- Stable functions are supported SELECT * FROM test_table_1 ORDER BY 1 DESC, 2 DESC, 3 DESC; - id | date_col | col_3 -----+------------------------------+------- + id | date_col | col_3 +--------------------------------------------------------------------- 3 | Mon Jan 12 08:35:19 2111 PST | 9 2 | Sun Feb 01 08:31:16 2015 PST | 7 1 | Sat Apr 05 08:32:12 2014 PDT | 5 @@ -773,8 +771,8 @@ SELECT * FROM test_table_1 ORDER BY 1 DESC, 2 DESC, 3 DESC; UPDATE test_table_1 SET col_3 = 3 WHERE date_col < now(); SELECT * FROM test_table_1 ORDER BY 1 DESC, 2 DESC, 3 DESC; - id | date_col | col_3 -----+------------------------------+------- + id | date_col | col_3 +--------------------------------------------------------------------- 3 | Mon Jan 12 08:35:19 2111 PST | 9 2 | Sun Feb 01 08:31:16 2015 PST | 3 1 | Sat Apr 05 08:32:12 2014 PDT | 3 @@ -782,8 +780,8 @@ SELECT * FROM test_table_1 ORDER BY 1 DESC, 2 DESC, 3 DESC; DELETE FROM test_table_1 WHERE date_col < current_timestamp; SELECT * FROM test_table_1 ORDER BY 1 DESC, 2 DESC, 3 DESC; - id | date_col | col_3 -----+------------------------------+------- + id | date_col | col_3 +--------------------------------------------------------------------- 3 | Mon Jan 12 08:35:19 2111 PST | 9 (1 row) @@ -795,9 +793,9 @@ INSERT INTO test_table_2 VALUES(2, random()); INSERT INTO test_table_2 VALUES(3, random()); SELECT create_distributed_table('test_table_2', 'id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) UPDATE test_table_2 SET double_col = random(); @@ -805,42 +803,42 @@ ERROR: functions used in UPDATE queries on distributed tables must not be VOLAT DROP TABLE test_table_2; -- Run multi shard updates and deletes without transaction on reference tables SELECT COUNT(*) FROM users_reference_copy_table; - count -------- + count +--------------------------------------------------------------------- 15 (1 row) UPDATE users_reference_copy_table SET value_1 = 1; SELECT SUM(value_1) FROM users_reference_copy_table; - sum ------ + sum +--------------------------------------------------------------------- 15 (1 row) SELECT COUNT(*), SUM(value_2) FROM users_reference_copy_table WHERE user_id = 3 or user_id = 5; - count | sum --------+----- + count | sum +--------------------------------------------------------------------- 4 | 52 (1 row) UPDATE users_reference_copy_table SET value_2 = value_2 + 1 WHERE user_id = 3 or user_id = 5; SELECT COUNT(*), SUM(value_2) FROM users_reference_copy_table WHERE user_id = 3 or user_id = 5; - count | sum --------+----- + count | sum +--------------------------------------------------------------------- 4 | 56 (1 row) UPDATE users_reference_copy_table SET value_3 = 0 WHERE user_id <> 3; SELECT SUM(value_3) FROM users_reference_copy_table WHERE user_id <> 3; - sum ------ + sum +--------------------------------------------------------------------- 0 (1 row) DELETE FROM users_reference_copy_table WHERE user_id = 3 or user_id = 5; SELECT COUNT(*) FROM users_reference_copy_table WHERE user_id = 3 or user_id = 5; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -849,50 +847,50 @@ DROP TABLE users_test_table; SET citus.shard_replication_factor to 2; CREATE TABLE users_test_table(user_id int, value_1 int, value_2 int, value_3 int); SELECT create_distributed_table('users_test_table', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \COPY users_test_table FROM STDIN DELIMITER AS ','; -- Run multi shard updates and deletes without transaction on hash distributed tables UPDATE users_test_table SET value_1 = 1; SELECT COUNT(*), SUM(value_1) FROM users_test_table; - count | sum --------+----- + count | sum +--------------------------------------------------------------------- 15 | 15 (1 row) SELECT COUNT(*), SUM(value_2) FROM users_test_table WHERE user_id = 1 or user_id = 3; - count | sum --------+----- + count | sum +--------------------------------------------------------------------- 4 | 52 (1 row) UPDATE users_test_table SET value_2 = value_2 + 1 WHERE user_id = 1 or user_id = 3; SELECT COUNT(*), SUM(value_2) FROM users_test_table WHERE user_id = 1 or user_id = 3; - count | sum --------+----- + count | sum +--------------------------------------------------------------------- 4 | 56 (1 row) UPDATE users_test_table SET value_3 = 0 WHERE user_id <> 5; SELECT SUM(value_3) FROM users_test_table WHERE user_id <> 5; - sum ------ + sum +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) FROM users_test_table WHERE user_id = 3 or user_id = 5; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) DELETE FROM users_test_table WHERE user_id = 3 or user_id = 5; SELECT COUNT(*) FROM users_test_table WHERE user_id = 3 or user_id = 5; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/multi_simple_queries.out b/src/test/regress/expected/multi_simple_queries.out index 52536160e..fb999d87c 100644 --- a/src/test/regress/expected/multi_simple_queries.out +++ b/src/test/regress/expected/multi_simple_queries.out @@ -1,7 +1,7 @@ SET citus.next_shard_id TO 850000; -- many of the tests in this file is intended for testing non-fast-path --- router planner, so we're explicitly disabling it in this file. --- We've bunch of other tests that triggers fast-path-router +-- router planner, so we're explicitly disabling it in this file. +-- We've bunch of other tests that triggers fast-path-router SET citus.enable_fast_path_router_planner TO false; -- =================================================================== -- test end-to-end query functionality @@ -17,27 +17,27 @@ CREATE TABLE authors ( name text, id bigint ); -- this table is used in router executor tests CREATE TABLE articles_single_shard (LIKE articles); SELECT master_create_distributed_table('articles', 'author_id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_distributed_table('articles_single_shard', 'author_id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('articles', 2, 1); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('articles_single_shard', 1, 1); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) -- create a bunch of test data @@ -100,15 +100,15 @@ DELETE FROM articles WHERE author_id = 1 AND author_id = 2; -- single-shard tests -- test simple select for a single row SELECT * FROM articles WHERE author_id = 10 AND id = 50; - id | author_id | title | word_count -----+-----------+-----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 50 | 10 | anjanette | 19519 (1 row) -- get all titles by a single author SELECT title FROM articles WHERE author_id = 10; - title ------------- + title +--------------------------------------------------------------------- aggrandize absentness andelee @@ -120,8 +120,8 @@ SELECT title FROM articles WHERE author_id = 10; SELECT title, word_count FROM articles WHERE author_id = 10 ORDER BY word_count DESC NULLS LAST; - title | word_count -------------+------------ + title | word_count +--------------------------------------------------------------------- anjanette | 19519 aggrandize | 17277 attemper | 14976 @@ -134,8 +134,8 @@ SELECT title, id FROM articles WHERE author_id = 5 ORDER BY id LIMIT 2; - title | id ----------+---- + title | id +--------------------------------------------------------------------- aruru | 5 adversa | 15 (2 rows) @@ -144,8 +144,8 @@ SELECT title, id FROM articles SELECT title, author_id FROM articles WHERE author_id = 7 OR author_id = 8 ORDER BY author_id ASC, id; - title | author_id --------------+----------- + title | author_id +--------------------------------------------------------------------- aseptic | 7 auriga | 7 arsenous | 7 @@ -164,8 +164,8 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles GROUP BY author_id HAVING sum(word_count) > 40000 ORDER BY sum(word_count) DESC; - author_id | corpus_size ------------+------------- + author_id | corpus_size +--------------------------------------------------------------------- 2 | 61782 10 | 59955 8 | 55410 @@ -175,8 +175,8 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles SELECT * FROM articles WHERE author_id = 10 UNION SELECT * FROM articles WHERE author_id = 2 ORDER BY 1,2,3; - id | author_id | title | word_count -----+-----------+------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 2 | 2 | abducing | 13642 10 | 10 | aggrandize | 17277 12 | 2 | archiblast | 18185 @@ -192,8 +192,8 @@ ORDER BY 1,2,3; -- queries using CTEs are supported WITH long_names AS ( SELECT id FROM authors WHERE char_length(name) > 15 ) SELECT title FROM articles ORDER BY 1 LIMIT 5; - title ------------ + title +--------------------------------------------------------------------- abducing abeyance abhorring @@ -203,8 +203,8 @@ SELECT title FROM articles ORDER BY 1 LIMIT 5; -- queries which involve functions in FROM clause are recursively planned SELECT * FROM articles, position('om' in 'Thomas') ORDER BY 2 DESC, 1 DESC, 3 DESC LIMIT 5; - id | author_id | title | word_count | position -----+-----------+------------+------------+---------- + id | author_id | title | word_count | position +--------------------------------------------------------------------- 50 | 10 | anjanette | 19519 | 3 40 | 10 | attemper | 14976 | 3 30 | 10 | andelee | 6363 | 3 @@ -214,16 +214,16 @@ SELECT * FROM articles, position('om' in 'Thomas') ORDER BY 2 DESC, 1 DESC, 3 DE -- subqueries are supported in WHERE clause in Citus even if the relations are not distributed SELECT * FROM articles WHERE author_id IN (SELECT id FROM authors WHERE name LIKE '%a'); - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) -- subqueries are supported in FROM clause SELECT articles.id,test.word_count FROM articles, (SELECT id, word_count FROM articles) AS test WHERE test.id = articles.id ORDER BY articles.id; - id | word_count -----+------------ + id | word_count +--------------------------------------------------------------------- 1 | 9572 2 | 13642 3 | 10480 @@ -305,8 +305,8 @@ HINT: If you want to discard the results of a SELECT, use PERFORM instead. CONTEXT: PL/pgSQL function inline_code_block line 3 at SQL statement -- test cross-shard queries SELECT COUNT(*) FROM articles; - count -------- + count +--------------------------------------------------------------------- 50 (1 row) @@ -329,8 +329,8 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles HAVING sum(word_count) > 25000 ORDER BY sum(word_count) DESC LIMIT 5; - author_id | corpus_size ------------+------------- + author_id | corpus_size +--------------------------------------------------------------------- 4 | 66325 2 | 61782 10 | 59955 @@ -342,8 +342,8 @@ SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 ORDER BY author_id; - author_id ------------ + author_id +--------------------------------------------------------------------- 2 4 6 @@ -355,8 +355,8 @@ SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 AND author_id < 5 ORDER BY author_id; - author_id ------------ + author_id +--------------------------------------------------------------------- 2 4 (2 rows) @@ -365,8 +365,8 @@ SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 OR author_id < 5 ORDER BY author_id; - author_id ------------ + author_id +--------------------------------------------------------------------- 1 2 3 @@ -380,19 +380,19 @@ SELECT author_id FROM articles GROUP BY author_id HAVING author_id <= 2 OR author_id = 8 ORDER BY author_id; - author_id ------------ + author_id +--------------------------------------------------------------------- 1 2 8 (3 rows) -SELECT o_orderstatus, count(*), avg(o_totalprice) FROM orders +SELECT o_orderstatus, count(*), avg(o_totalprice) FROM orders GROUP BY o_orderstatus HAVING count(*) > 1450 OR avg(o_totalprice) > 150000 ORDER BY o_orderstatus; - o_orderstatus | count | avg ----------------+-------+--------------------- + o_orderstatus | count | avg +--------------------------------------------------------------------- O | 1461 | 143326.447029431896 P | 75 | 164847.914533333333 (2 rows) @@ -402,8 +402,8 @@ SELECT o_orderstatus, sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders GROUP BY o_orderstatus HAVING sum(l_linenumber) > 1000 ORDER BY o_orderstatus; - o_orderstatus | sum | avg ----------------+------+-------------------- + o_orderstatus | sum | avg +--------------------------------------------------------------------- F | 8559 | 3.0126715945089757 O | 8904 | 3.0040485829959514 (2 rows) @@ -419,8 +419,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -434,8 +434,8 @@ SELECT * WHERE author_id = 1 OR author_id = 17; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -448,8 +448,8 @@ SELECT * FROM articles WHERE author_id = 1 OR author_id = 18; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -464,8 +464,8 @@ SELECT id as article_id, word_count * id as random_value DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - article_id | random_value -------------+-------------- + article_id | random_value +--------------------------------------------------------------------- 1 | 9572 11 | 14817 21 | 123690 @@ -482,8 +482,8 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count ---------------+------------------- + first_author | second_word_count +--------------------------------------------------------------------- 10 | 17277 10 | 1820 10 | 6363 @@ -498,8 +498,8 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count ---------------+------------------- + first_author | second_word_count +--------------------------------------------------------------------- 10 | 19519 10 | 19519 10 | 19519 @@ -513,8 +513,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 (2 rows) @@ -530,8 +530,8 @@ SELECT id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id ----- + id +--------------------------------------------------------------------- 1 11 21 @@ -550,8 +550,8 @@ SELECT avg(word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - avg --------------------- + avg +--------------------------------------------------------------------- 12356.400000000000 (1 row) @@ -566,7 +566,7 @@ ERROR: unsupported aggregate function invalid DROP AGGREGATE invalid(int); SET client_min_messages to 'DEBUG2'; -- max, min, sum, count is somehow implemented --- differently in distributed planning +-- differently in distributed planning SELECT max(word_count) as max, min(word_count) as min, sum(word_count) as sum, count(word_count) as cnt FROM articles @@ -574,8 +574,8 @@ SELECT max(word_count) as max, min(word_count) as min, DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - max | min | sum | cnt --------+------+-------+----- + max | min | sum | cnt +--------------------------------------------------------------------- 18185 | 2728 | 61782 | 5 (1 row) @@ -626,8 +626,8 @@ SELECT count(*) FROM ( xmax IS NOT NULL ) x; DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 50 (1 row) @@ -636,24 +636,24 @@ SELECT * FROM articles TABLESAMPLE SYSTEM (0) WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles TABLESAMPLE BERNOULLI (0) WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles TABLESAMPLE SYSTEM (100) WHERE author_id = 1 ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -665,8 +665,8 @@ SELECT * FROM articles TABLESAMPLE BERNOULLI (100) WHERE author_id = 1 ORDER BY DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -681,8 +681,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles TABLESAMPLE BERNOULLI (0) WHERE author_id = 1; @@ -690,8 +690,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles TABLESAMPLE SYSTEM (100) WHERE author_id = 1 ORDER BY id; @@ -699,8 +699,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -713,8 +713,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 diff --git a/src/test/regress/expected/multi_simple_queries_0.out b/src/test/regress/expected/multi_simple_queries_0.out index 6ad020ef4..134074c8d 100644 --- a/src/test/regress/expected/multi_simple_queries_0.out +++ b/src/test/regress/expected/multi_simple_queries_0.out @@ -1,7 +1,7 @@ SET citus.next_shard_id TO 850000; -- many of the tests in this file is intended for testing non-fast-path --- router planner, so we're explicitly disabling it in this file. --- We've bunch of other tests that triggers fast-path-router +-- router planner, so we're explicitly disabling it in this file. +-- We've bunch of other tests that triggers fast-path-router SET citus.enable_fast_path_router_planner TO false; -- =================================================================== -- test end-to-end query functionality @@ -17,27 +17,27 @@ CREATE TABLE authors ( name text, id bigint ); -- this table is used in router executor tests CREATE TABLE articles_single_shard (LIKE articles); SELECT master_create_distributed_table('articles', 'author_id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_distributed_table('articles_single_shard', 'author_id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('articles', 2, 1); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('articles_single_shard', 1, 1); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) -- create a bunch of test data @@ -100,15 +100,15 @@ DELETE FROM articles WHERE author_id = 1 AND author_id = 2; -- single-shard tests -- test simple select for a single row SELECT * FROM articles WHERE author_id = 10 AND id = 50; - id | author_id | title | word_count -----+-----------+-----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 50 | 10 | anjanette | 19519 (1 row) -- get all titles by a single author SELECT title FROM articles WHERE author_id = 10; - title ------------- + title +--------------------------------------------------------------------- aggrandize absentness andelee @@ -120,8 +120,8 @@ SELECT title FROM articles WHERE author_id = 10; SELECT title, word_count FROM articles WHERE author_id = 10 ORDER BY word_count DESC NULLS LAST; - title | word_count -------------+------------ + title | word_count +--------------------------------------------------------------------- anjanette | 19519 aggrandize | 17277 attemper | 14976 @@ -134,8 +134,8 @@ SELECT title, id FROM articles WHERE author_id = 5 ORDER BY id LIMIT 2; - title | id ----------+---- + title | id +--------------------------------------------------------------------- aruru | 5 adversa | 15 (2 rows) @@ -144,8 +144,8 @@ SELECT title, id FROM articles SELECT title, author_id FROM articles WHERE author_id = 7 OR author_id = 8 ORDER BY author_id ASC, id; - title | author_id --------------+----------- + title | author_id +--------------------------------------------------------------------- aseptic | 7 auriga | 7 arsenous | 7 @@ -164,8 +164,8 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles GROUP BY author_id HAVING sum(word_count) > 40000 ORDER BY sum(word_count) DESC; - author_id | corpus_size ------------+------------- + author_id | corpus_size +--------------------------------------------------------------------- 2 | 61782 10 | 59955 8 | 55410 @@ -175,8 +175,8 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles SELECT * FROM articles WHERE author_id = 10 UNION SELECT * FROM articles WHERE author_id = 2 ORDER BY 1,2,3; - id | author_id | title | word_count -----+-----------+------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 2 | 2 | abducing | 13642 10 | 10 | aggrandize | 17277 12 | 2 | archiblast | 18185 @@ -192,8 +192,8 @@ ORDER BY 1,2,3; -- queries using CTEs are supported WITH long_names AS ( SELECT id FROM authors WHERE char_length(name) > 15 ) SELECT title FROM articles ORDER BY 1 LIMIT 5; - title ------------ + title +--------------------------------------------------------------------- abducing abeyance abhorring @@ -203,8 +203,8 @@ SELECT title FROM articles ORDER BY 1 LIMIT 5; -- queries which involve functions in FROM clause are recursively planned SELECT * FROM articles, position('om' in 'Thomas') ORDER BY 2 DESC, 1 DESC, 3 DESC LIMIT 5; - id | author_id | title | word_count | position -----+-----------+------------+------------+---------- + id | author_id | title | word_count | position +--------------------------------------------------------------------- 50 | 10 | anjanette | 19519 | 3 40 | 10 | attemper | 14976 | 3 30 | 10 | andelee | 6363 | 3 @@ -249,8 +249,8 @@ HINT: If you want to discard the results of a SELECT, use PERFORM instead. CONTEXT: PL/pgSQL function inline_code_block line 3 at SQL statement -- test cross-shard queries SELECT COUNT(*) FROM articles; - count -------- + count +--------------------------------------------------------------------- 50 (1 row) @@ -273,8 +273,8 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles HAVING sum(word_count) > 25000 ORDER BY sum(word_count) DESC LIMIT 5; - author_id | corpus_size ------------+------------- + author_id | corpus_size +--------------------------------------------------------------------- 4 | 66325 2 | 61782 10 | 59955 @@ -286,8 +286,8 @@ SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 ORDER BY author_id; - author_id ------------ + author_id +--------------------------------------------------------------------- 2 4 6 @@ -299,8 +299,8 @@ SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 AND author_id < 5 ORDER BY author_id; - author_id ------------ + author_id +--------------------------------------------------------------------- 2 4 (2 rows) @@ -309,8 +309,8 @@ SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 OR author_id < 5 ORDER BY author_id; - author_id ------------ + author_id +--------------------------------------------------------------------- 1 2 3 @@ -324,19 +324,19 @@ SELECT author_id FROM articles GROUP BY author_id HAVING author_id <= 2 OR author_id = 8 ORDER BY author_id; - author_id ------------ + author_id +--------------------------------------------------------------------- 1 2 8 (3 rows) -SELECT o_orderstatus, count(*), avg(o_totalprice) FROM orders +SELECT o_orderstatus, count(*), avg(o_totalprice) FROM orders GROUP BY o_orderstatus HAVING count(*) > 1450 OR avg(o_totalprice) > 150000 ORDER BY o_orderstatus; - o_orderstatus | count | avg ----------------+-------+--------------------- + o_orderstatus | count | avg +--------------------------------------------------------------------- O | 1461 | 143326.447029431896 P | 75 | 164847.914533333333 (2 rows) @@ -346,8 +346,8 @@ SELECT o_orderstatus, sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders GROUP BY o_orderstatus HAVING sum(l_linenumber) > 1000 ORDER BY o_orderstatus; - o_orderstatus | sum | avg ----------------+------+-------------------- + o_orderstatus | sum | avg +--------------------------------------------------------------------- F | 8559 | 3.0126715945089757 O | 8904 | 3.0040485829959514 (2 rows) @@ -363,8 +363,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -378,8 +378,8 @@ SELECT * WHERE author_id = 1 OR author_id = 17; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -392,8 +392,8 @@ SELECT * FROM articles WHERE author_id = 1 OR author_id = 18; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -408,8 +408,8 @@ SELECT id as article_id, word_count * id as random_value DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - article_id | random_value -------------+-------------- + article_id | random_value +--------------------------------------------------------------------- 1 | 9572 11 | 14817 21 | 123690 @@ -426,8 +426,8 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count ---------------+------------------- + first_author | second_word_count +--------------------------------------------------------------------- 10 | 17277 10 | 1820 10 | 6363 @@ -442,8 +442,8 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count ---------------+------------------- + first_author | second_word_count +--------------------------------------------------------------------- 10 | 19519 10 | 19519 10 | 19519 @@ -457,8 +457,8 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+----------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 (2 rows) @@ -474,8 +474,8 @@ SELECT id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id ----- + id +--------------------------------------------------------------------- 1 11 21 @@ -494,8 +494,8 @@ SELECT avg(word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - avg --------------------- + avg +--------------------------------------------------------------------- 12356.400000000000 (1 row) @@ -510,7 +510,7 @@ ERROR: unsupported aggregate function invalid DROP AGGREGATE invalid(int); SET client_min_messages to 'DEBUG2'; -- max, min, sum, count is somehow implemented --- differently in distributed planning +-- differently in distributed planning SELECT max(word_count) as max, min(word_count) as min, sum(word_count) as sum, count(word_count) as cnt FROM articles @@ -518,8 +518,8 @@ SELECT max(word_count) as max, min(word_count) as min, DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - max | min | sum | cnt --------+------+-------+----- + max | min | sum | cnt +--------------------------------------------------------------------- 18185 | 2728 | 61782 | 5 (1 row) @@ -570,8 +570,8 @@ SELECT count(*) FROM ( xmax IS NOT NULL ) x; DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 50 (1 row) @@ -580,24 +580,24 @@ SELECT * FROM articles TABLESAMPLE SYSTEM (0) WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles TABLESAMPLE BERNOULLI (0) WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles TABLESAMPLE SYSTEM (100) WHERE author_id = 1 ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -609,8 +609,8 @@ SELECT * FROM articles TABLESAMPLE BERNOULLI (100) WHERE author_id = 1 ORDER BY DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -625,8 +625,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles TABLESAMPLE BERNOULLI (0) WHERE author_id = 1; @@ -634,8 +634,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+-------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles TABLESAMPLE SYSTEM (100) WHERE author_id = 1 ORDER BY id; @@ -643,8 +643,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -657,8 +657,8 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count -----+-----------+--------------+------------ + id | author_id | title | word_count +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 diff --git a/src/test/regress/expected/multi_single_relation_subquery.out b/src/test/regress/expected/multi_single_relation_subquery.out index 6dec98669..aa097a015 100644 --- a/src/test/regress/expected/multi_single_relation_subquery.out +++ b/src/test/regress/expected/multi_single_relation_subquery.out @@ -27,8 +27,8 @@ order by total desc, number_sum desc limit 10; - number_sum | total | avg_count -------------+-------+-------------------- + number_sum | total | avg_count +--------------------------------------------------------------------- 10 | 136 | 2.3970588235294118 11 | 97 | 2.6082474226804124 12 | 56 | 2.8392857142857143 @@ -65,8 +65,8 @@ order by total desc, number_sum desc limit 10; - number_sum | total | avg_count -------------+-------+-------------------- + number_sum | total | avg_count +--------------------------------------------------------------------- 10 | 136 | 2.3970588235294118 11 | 97 | 2.6082474226804124 12 | 56 | 2.8392857142857143 @@ -97,8 +97,8 @@ group by order by avg_count desc, suppkey_bin DESC limit 20; - suppkey_bin | avg_count --------------+-------------------- + suppkey_bin | avg_count +--------------------------------------------------------------------- 95 | 1.4851485148514851 90 | 1.4761904761904762 52 | 1.4680851063829787 @@ -150,8 +150,8 @@ group by total order by total; - total | total_avg_count --------+-------------------- + total | total_avg_count +--------------------------------------------------------------------- 1 | 4.8000000000000000 6 | 3.0000000000000000 10 | 3.5000000000000000 @@ -174,8 +174,8 @@ from group by (l_orderkey/4)::int, l_suppkey ) as distributed_table; - avg ------------------------- + avg +--------------------------------------------------------------------- 1.00083402835696413678 (1 row) @@ -196,7 +196,7 @@ from limit 100) as distributed_table group by l_suppkey - ORDER BY 2 DESC, 1 DESC + ORDER BY 2 DESC, 1 DESC LIMIT 5; ERROR: cannot perform distributed planning on this query DETAIL: Subqueries with limit are not supported yet @@ -224,8 +224,8 @@ from lineitem group by l_partkey) as distributed_table; - avg ------------------------- + avg +--------------------------------------------------------------------- 1.02907126318497555956 (1 row) @@ -238,10 +238,10 @@ from lineitem group by l_partkey - having + having count(distinct l_shipdate) >= 2) as distributed_table; - avg --------------------- + avg +--------------------------------------------------------------------- 2.0335365853658537 (1 row) @@ -261,8 +261,8 @@ SELECT max(l_suppkey) FROM GROUP BY l_suppkey) z ) y; - max ------- + max +--------------------------------------------------------------------- 9999 (1 row) diff --git a/src/test/regress/expected/multi_size_queries.out b/src/test/regress/expected/multi_size_queries.out index e94b29af0..3e4961e1e 100644 --- a/src/test/regress/expected/multi_size_queries.out +++ b/src/test/regress/expected/multi_size_queries.out @@ -2,7 +2,7 @@ -- MULTI_SIZE_QUERIES -- -- Test checks whether size of distributed tables can be obtained with citus_table_size. --- To find the relation size and total relation size citus_relation_size and +-- To find the relation size and total relation size citus_relation_size and -- citus_total_relation_size are also tested. SET citus.next_shard_id TO 1390000; -- Tests with invalid relation IDs @@ -31,20 +31,20 @@ ERROR: cannot calculate the size because replication factor is greater than 1 VACUUM (FULL) customer_copy_hash; -- Tests on distributed tables with streaming replication. SELECT citus_table_size('customer_copy_hash'); - citus_table_size ------------------- + citus_table_size +--------------------------------------------------------------------- 548864 (1 row) SELECT citus_relation_size('customer_copy_hash'); - citus_relation_size ---------------------- + citus_relation_size +--------------------------------------------------------------------- 548864 (1 row) SELECT citus_total_relation_size('customer_copy_hash'); - citus_total_relation_size ---------------------------- + citus_total_relation_size +--------------------------------------------------------------------- 1597440 (1 row) @@ -52,8 +52,8 @@ SELECT citus_total_relation_size('customer_copy_hash'); SELECT citus_table_size('customer_copy_hash'), citus_table_size('customer_copy_hash'), citus_table_size('supplier'); - citus_table_size | citus_table_size | citus_table_size -------------------+------------------+------------------ + citus_table_size | citus_table_size | citus_table_size +--------------------------------------------------------------------- 548864 | 548864 | 401408 (1 row) @@ -61,60 +61,60 @@ CREATE INDEX index_1 on customer_copy_hash(c_custkey); VACUUM (FULL) customer_copy_hash; -- Tests on distributed table with index. SELECT citus_table_size('customer_copy_hash'); - citus_table_size ------------------- + citus_table_size +--------------------------------------------------------------------- 548864 (1 row) SELECT citus_relation_size('customer_copy_hash'); - citus_relation_size ---------------------- + citus_relation_size +--------------------------------------------------------------------- 548864 (1 row) SELECT citus_total_relation_size('customer_copy_hash'); - citus_total_relation_size ---------------------------- + citus_total_relation_size +--------------------------------------------------------------------- 2646016 (1 row) -- Tests on reference table VACUUM (FULL) supplier; SELECT citus_table_size('supplier'); - citus_table_size ------------------- + citus_table_size +--------------------------------------------------------------------- 376832 (1 row) SELECT citus_relation_size('supplier'); - citus_relation_size ---------------------- + citus_relation_size +--------------------------------------------------------------------- 376832 (1 row) SELECT citus_total_relation_size('supplier'); - citus_total_relation_size ---------------------------- + citus_total_relation_size +--------------------------------------------------------------------- 376832 (1 row) CREATE INDEX index_2 on supplier(s_suppkey); VACUUM (FULL) supplier; SELECT citus_table_size('supplier'); - citus_table_size ------------------- + citus_table_size +--------------------------------------------------------------------- 376832 (1 row) SELECT citus_relation_size('supplier'); - citus_relation_size ---------------------- + citus_relation_size +--------------------------------------------------------------------- 376832 (1 row) SELECT citus_total_relation_size('supplier'); - citus_total_relation_size ---------------------------- + citus_total_relation_size +--------------------------------------------------------------------- 458752 (1 row) @@ -125,43 +125,43 @@ select citus_table_size('supplier'); ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications END; show citus.node_conninfo; - citus.node_conninfo ---------------------- + citus.node_conninfo +--------------------------------------------------------------------- sslmode=require (1 row) ALTER SYSTEM SET citus.node_conninfo = 'sslmode=require'; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) -- make sure that any invalidation to the connection info -- wouldn't prevent future commands to fail SELECT citus_total_relation_size('customer_copy_hash'); - citus_total_relation_size ---------------------------- + citus_total_relation_size +--------------------------------------------------------------------- 2646016 (1 row) SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) SELECT citus_total_relation_size('customer_copy_hash'); - citus_total_relation_size ---------------------------- + citus_total_relation_size +--------------------------------------------------------------------- 2646016 (1 row) -- reset back to the original node_conninfo ALTER SYSTEM RESET citus.node_conninfo; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_sql_function.out b/src/test/regress/expected/multi_sql_function.out index dc4c2547f..4cd18c81c 100644 --- a/src/test/regress/expected/multi_sql_function.out +++ b/src/test/regress/expected/multi_sql_function.out @@ -37,26 +37,26 @@ SET citus.task_executor_type TO 'task-tracker'; SET client_min_messages TO INFO; -- now, run plain SQL functions SELECT sql_test_no_1(); - sql_test_no_1 ---------------- + sql_test_no_1 +--------------------------------------------------------------------- 2985 (1 row) SELECT sql_test_no_2(); - sql_test_no_2 ---------------- + sql_test_no_2 +--------------------------------------------------------------------- 12000 (1 row) SELECT sql_test_no_3(); - sql_test_no_3 ---------------- + sql_test_no_3 +--------------------------------------------------------------------- 1956 (1 row) SELECT sql_test_no_4(); - sql_test_no_4 ---------------- + sql_test_no_4 +--------------------------------------------------------------------- 7806 (1 row) @@ -65,14 +65,14 @@ SELECT sql_test_no_4(); RESET citus.task_executor_type; -- now, run plain SQL functions SELECT sql_test_no_1(); - sql_test_no_1 ---------------- + sql_test_no_1 +--------------------------------------------------------------------- 2985 (1 row) SELECT sql_test_no_2(); - sql_test_no_2 ---------------- + sql_test_no_2 +--------------------------------------------------------------------- 12000 (1 row) @@ -83,9 +83,9 @@ CREATE TABLE temp_table ( ); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('temp_table','key','hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE FUNCTION no_parameter_insert_sql() RETURNS void AS $$ @@ -93,39 +93,39 @@ CREATE FUNCTION no_parameter_insert_sql() RETURNS void AS $$ $$ LANGUAGE SQL; -- execute 6 times SELECT no_parameter_insert_sql(); - no_parameter_insert_sql -------------------------- - + no_parameter_insert_sql +--------------------------------------------------------------------- + (1 row) SELECT no_parameter_insert_sql(); - no_parameter_insert_sql -------------------------- - + no_parameter_insert_sql +--------------------------------------------------------------------- + (1 row) SELECT no_parameter_insert_sql(); - no_parameter_insert_sql -------------------------- - + no_parameter_insert_sql +--------------------------------------------------------------------- + (1 row) SELECT no_parameter_insert_sql(); - no_parameter_insert_sql -------------------------- - + no_parameter_insert_sql +--------------------------------------------------------------------- + (1 row) SELECT no_parameter_insert_sql(); - no_parameter_insert_sql -------------------------- - + no_parameter_insert_sql +--------------------------------------------------------------------- + (1 row) SELECT no_parameter_insert_sql(); - no_parameter_insert_sql -------------------------- - + no_parameter_insert_sql +--------------------------------------------------------------------- + (1 row) CREATE FUNCTION non_partition_parameter_insert_sql(int) RETURNS void AS $$ @@ -133,57 +133,57 @@ CREATE FUNCTION non_partition_parameter_insert_sql(int) RETURNS void AS $$ $$ LANGUAGE SQL; -- execute 6 times SELECT non_partition_parameter_insert_sql(10); - non_partition_parameter_insert_sql ------------------------------------- - + non_partition_parameter_insert_sql +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_insert_sql(20); - non_partition_parameter_insert_sql ------------------------------------- - + non_partition_parameter_insert_sql +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_insert_sql(30); - non_partition_parameter_insert_sql ------------------------------------- - + non_partition_parameter_insert_sql +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_insert_sql(40); - non_partition_parameter_insert_sql ------------------------------------- - + non_partition_parameter_insert_sql +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_insert_sql(50); - non_partition_parameter_insert_sql ------------------------------------- - + non_partition_parameter_insert_sql +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_insert_sql(60); - non_partition_parameter_insert_sql ------------------------------------- - + non_partition_parameter_insert_sql +--------------------------------------------------------------------- + (1 row) -- check inserted values SELECT * FROM temp_table ORDER BY key, value; - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 10 0 | 20 0 | 30 0 | 40 0 | 50 0 | 60 - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | (12 rows) -- check updates @@ -192,57 +192,57 @@ CREATE FUNCTION non_partition_parameter_update_sql(int, int) RETURNS void AS $$ $$ LANGUAGE SQL; -- execute 6 times SELECT non_partition_parameter_update_sql(10, 12); - non_partition_parameter_update_sql ------------------------------------- - + non_partition_parameter_update_sql +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_update_sql(20, 22); - non_partition_parameter_update_sql ------------------------------------- - + non_partition_parameter_update_sql +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_update_sql(30, 32); - non_partition_parameter_update_sql ------------------------------------- - + non_partition_parameter_update_sql +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_update_sql(40, 42); - non_partition_parameter_update_sql ------------------------------------- - + non_partition_parameter_update_sql +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_update_sql(50, 52); - non_partition_parameter_update_sql ------------------------------------- - + non_partition_parameter_update_sql +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_update_sql(60, 62); - non_partition_parameter_update_sql ------------------------------------- - + non_partition_parameter_update_sql +--------------------------------------------------------------------- + (1 row) -- check after updates SELECT * FROM temp_table ORDER BY key, value; - key | value ------+------- + key | value +--------------------------------------------------------------------- 0 | 12 0 | 22 0 | 32 0 | 42 0 | 52 0 | 62 - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | (12 rows) -- check deletes @@ -251,59 +251,59 @@ CREATE FUNCTION non_partition_parameter_delete_sql(int) RETURNS void AS $$ $$ LANGUAGE SQL; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_delete_sql(12); - non_partition_parameter_delete_sql ------------------------------------- - + non_partition_parameter_delete_sql +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_delete_sql(22); - non_partition_parameter_delete_sql ------------------------------------- - + non_partition_parameter_delete_sql +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_delete_sql(32); - non_partition_parameter_delete_sql ------------------------------------- - + non_partition_parameter_delete_sql +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_delete_sql(42); - non_partition_parameter_delete_sql ------------------------------------- - + non_partition_parameter_delete_sql +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_delete_sql(52); - non_partition_parameter_delete_sql ------------------------------------- - + non_partition_parameter_delete_sql +--------------------------------------------------------------------- + (1 row) SELECT non_partition_parameter_delete_sql(62); - non_partition_parameter_delete_sql ------------------------------------- - + non_partition_parameter_delete_sql +--------------------------------------------------------------------- + (1 row) -- check after deletes SELECT * FROM temp_table ORDER BY key, value; - key | value ------+------- - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + key | value +--------------------------------------------------------------------- + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | (6 rows) -- test running parameterized SQL function CREATE TABLE test_parameterized_sql(id integer, org_id integer); select create_distributed_table('test_parameterized_sql','org_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE OR REPLACE FUNCTION test_parameterized_sql_function(org_id_val integer) @@ -337,9 +337,9 @@ CONTEXT: SQL function "test_parameterized_sql_function_in_subquery_where" state -- don't go over 2PC if they are not part of a bigger transaction. CREATE TABLE table_with_unique_constraint (a int UNIQUE); SELECT create_distributed_table('table_with_unique_constraint', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO table_with_unique_constraint VALUES (1), (2), (3); @@ -351,11 +351,11 @@ $$ LANGUAGE SQL; SELECT insert_twice(); ERROR: duplicate key value violates unique constraint "table_with_unique_constraint_a_key_1230009" DETAIL: Key (a)=(4) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx SQL function "insert_twice" statement 2 SELECT * FROM table_with_unique_constraint ORDER BY a; - a ---- + a +--------------------------------------------------------------------- 1 2 3 diff --git a/src/test/regress/expected/multi_subquery.out b/src/test/regress/expected/multi_subquery.out index 08c35700d..c80d1597f 100644 --- a/src/test/regress/expected/multi_subquery.out +++ b/src/test/regress/expected/multi_subquery.out @@ -17,8 +17,8 @@ FROM l_orderkey = o_orderkey GROUP BY l_orderkey) AS unit_prices; - avg -------------------------- + avg +--------------------------------------------------------------------- 142158.8766934673366834 (1 row) @@ -42,10 +42,10 @@ FROM lineitem_subquery GROUP BY l_suppkey) AS order_counts; -DEBUG: generating subplan 2_1 for subquery SELECT l_suppkey, count(*) AS order_count FROM public.lineitem_subquery GROUP BY l_suppkey -DEBUG: Plan 2 query after replacing subqueries and CTEs: SELECT avg(order_count) AS avg FROM (SELECT intermediate_result.l_suppkey, intermediate_result.order_count FROM read_intermediate_result('2_1'::text, 'binary'::citus_copy_format) intermediate_result(l_suppkey integer, order_count bigint)) order_counts - avg --------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT l_suppkey, count(*) AS order_count FROM public.lineitem_subquery GROUP BY l_suppkey +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(order_count) AS avg FROM (SELECT intermediate_result.l_suppkey, intermediate_result.order_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(l_suppkey integer, order_count bigint)) order_counts + avg +--------------------------------------------------------------------- 1.7199369356456930 (1 row) @@ -68,8 +68,8 @@ RESET client_min_messages; SELECT count(*) FROM ( SELECT l_orderkey FROM lineitem_subquery JOIN (SELECT random()::int r) sub ON (l_orderkey = r) WHERE r > 10 ) b; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -82,15 +82,15 @@ SELECT count(*) FROM ) b; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 7_1 for subquery SELECT l_orderkey FROM public.lineitem_subquery +DEBUG: generating subplan XXX_1 for subquery SELECT l_orderkey FROM public.lineitem_subquery DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 7_2 for subquery SELECT intermediate_result.l_orderkey FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint) UNION ALL SELECT (1)::bigint AS int8 -DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('7_2'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) b +DEBUG: generating subplan XXX_2 for subquery SELECT intermediate_result.l_orderkey FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint) UNION ALL SELECT (1)::bigint AS int8 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) b DEBUG: Creating router plan DEBUG: Plan is router executable - count -------- + count +--------------------------------------------------------------------- 12001 (1 row) @@ -102,17 +102,17 @@ SELECT count(*) FROM ) b; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 10_1 for subquery SELECT l_orderkey FROM public.lineitem_subquery +DEBUG: generating subplan XXX_1 for subquery SELECT l_orderkey FROM public.lineitem_subquery DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 10_2 for subquery SELECT l_partkey FROM public.lineitem_subquery +DEBUG: generating subplan XXX_2 for subquery SELECT l_partkey FROM public.lineitem_subquery DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 10_3 for subquery SELECT intermediate_result.l_orderkey FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint) UNION SELECT intermediate_result.l_partkey FROM read_intermediate_result('10_2'::text, 'binary'::citus_copy_format) intermediate_result(l_partkey integer) -DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('10_3'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) b +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.l_orderkey FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint) UNION SELECT intermediate_result.l_partkey FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(l_partkey integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) b DEBUG: Creating router plan DEBUG: Plan is router executable - count -------- + count +--------------------------------------------------------------------- 14496 (1 row) @@ -123,8 +123,8 @@ SELECT count(*) FROM (SELECT l_orderkey FROM lineitem_subquery) ) b; DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 2985 (1 row) @@ -150,8 +150,8 @@ FROM orders_subquery WHERE lineitem_quantities.l_orderkey = o_orderkey) orders_price ON true; - avg ------------------------- + avg +--------------------------------------------------------------------- 17470.0940725222668915 (1 row) @@ -192,8 +192,8 @@ WHERE (o_orderkey < l_quantity) ORDER BY l_orderkey DESC LIMIT 10; - l_orderkey ------------- + l_orderkey +--------------------------------------------------------------------- 39 39 39 @@ -218,8 +218,8 @@ WHERE (o_orderkey < l_quantity + 3) ORDER BY l_orderkey DESC LIMIT 10; - l_orderkey ------------- + l_orderkey +--------------------------------------------------------------------- 39 39 39 @@ -243,8 +243,8 @@ WHERE (o_orderkey < l_quantity + 3) ORDER BY l_orderkey DESC LIMIT 10; - l_orderkey ------------- + l_orderkey +--------------------------------------------------------------------- 39 39 39 @@ -278,8 +278,8 @@ JOIN ON (l_orderkey::int8 = o_orderkey::int8) ORDER BY l_orderkey DESC LIMIT 10; - l_orderkey ------------- + l_orderkey +--------------------------------------------------------------------- 14947 14947 14946 @@ -314,8 +314,8 @@ WHERE (o_orderkey::int8 < l_quantity::int8 + 3) ORDER BY l_orderkey DESC LIMIT 10; - l_orderkey ------------- + l_orderkey +--------------------------------------------------------------------- 39 39 39 @@ -339,8 +339,8 @@ WHERE (o_orderkey::int4 < l_quantity::int8 + 3) ORDER BY l_orderkey DESC LIMIT 10; - l_orderkey ------------- + l_orderkey +--------------------------------------------------------------------- 39 39 39 @@ -396,8 +396,8 @@ FROM events_table t1 LEFT JOIN users_reference_table t2 ON t1.user_id = trunc(t2.user_id) ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC LIMIT 5; - user_id | value_1 | value_2 | value_3 ----------+---------+---------+--------- + user_id | value_1 | value_2 | value_3 +--------------------------------------------------------------------- 6 | 5 | 2 | 0 5 | 5 | 5 | 1 4 | 5 | 4 | 1 @@ -411,8 +411,8 @@ FROM events_table t1 LEFT JOIN users_reference_table t2 ON t1.user_id > t2.user_id ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC LIMIT 5; - user_id | value_1 | value_2 | value_3 ----------+---------+---------+--------- + user_id | value_1 | value_2 | value_3 +--------------------------------------------------------------------- 6 | 5 | 5 | 3 5 | 5 | 5 | 3 4 | 5 | 5 | 3 @@ -433,11 +433,11 @@ FROM events_table t1 LEFT JOIN users_reference_table t2 ON t1.user_id = (CASE WHEN t2.user_id > 3 THEN 3 ELSE t2.user_id END) ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC LIMIT 5; - user_id | value_1 | value_2 | value_3 ----------+---------+---------+--------- - 6 | | | - 5 | | | - 4 | | | + user_id | value_1 | value_2 | value_3 +--------------------------------------------------------------------- + 6 | | | + 5 | | | + 4 | | | 3 | 5 | 5 | 3 2 | 4 | 4 | 5 (5 rows) @@ -450,8 +450,8 @@ SELECT DISTINCT ON (t1.user_id) t1.user_id, t2.value_1, t2.value_2, t2.value_3 LEFT JOIN users_reference_table t2 ON t1.user_id = trunc(t2.user_id) ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC LIMIT 5; - user_id | value_1 | value_2 | value_3 ----------+---------+---------+--------- + user_id | value_1 | value_2 | value_3 +--------------------------------------------------------------------- 6 | 5 | 2 | 0 5 | 5 | 5 | 1 4 | 5 | 4 | 1 @@ -478,8 +478,8 @@ FROM ( ) lo ORDER BY 1, 2, 3 LIMIT 5; - user_id | value_1 | event_type ----------+---------+------------ + user_id | value_1 | event_type +--------------------------------------------------------------------- 1 | 1 | 0 1 | 1 | 0 1 | 1 | 1 @@ -493,8 +493,8 @@ FROM events_table t1 JOIN users_reference_table t2 ON t1.user_id = trunc(t2.user_id) ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC LIMIT 5; - user_id | value_1 | value_2 | value_3 ----------+---------+---------+--------- + user_id | value_1 | value_2 | value_3 +--------------------------------------------------------------------- 6 | 5 | 2 | 0 5 | 5 | 5 | 1 4 | 5 | 4 | 1 @@ -513,8 +513,8 @@ WHERE (o_orderkey < l_quantity) ORDER BY l_orderkey DESC LIMIT 10; - l_orderkey ------------- + l_orderkey +--------------------------------------------------------------------- 39 38 37 @@ -536,8 +536,8 @@ JOIN ON (l_orderkey = o_orderkey) WHERE (o_orderkey < l_quantity); - count -------- + count +--------------------------------------------------------------------- 13 (1 row) @@ -552,8 +552,8 @@ WHERE (o_orderkey < l_quantity) ORDER BY l_quantity DESC LIMIT 10; - l_quantity ------------- + l_quantity +--------------------------------------------------------------------- 50.00 49.00 46.00 @@ -577,8 +577,8 @@ WHERE (o_orderkey < l_quantity) ORDER BY l_quantity DESC LIMIT 10; - l_quantity ------------- + l_quantity +--------------------------------------------------------------------- 50.00 49.00 46.00 @@ -600,8 +600,8 @@ JOIN ON (l_orderkey = o_orderkey) WHERE (o_orderkey < l_quantity); - count -------- + count +--------------------------------------------------------------------- 25 (1 row) @@ -616,8 +616,8 @@ FROM ( GROUP BY l_orderkey ) z; - count -------- + count +--------------------------------------------------------------------- 7 (1 row) @@ -668,8 +668,8 @@ ORDER BY total_order_count DESC, o_custkey ASC LIMIT 10; - o_custkey | total_order_count ------------+------------------- + o_custkey | total_order_count +--------------------------------------------------------------------- 1462 | 9 619 | 8 643 | 8 @@ -698,8 +698,8 @@ FROM WHERE unit_price > 1000 AND unit_price < 10000; - avg ------------------------ + avg +--------------------------------------------------------------------- 4968.4946466804019323 (1 row) @@ -717,8 +717,8 @@ SELECT count(*) FROM ) a WHERE l_orderkey = 1 ) b; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -730,8 +730,8 @@ SELECT count(*) FROM ) a WHERE l_orderkey = 1 ) b; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) @@ -748,8 +748,8 @@ SELECT max(l_orderkey) FROM l_orderkey ) z ) y; - max -------- + max +--------------------------------------------------------------------- 14947 (1 row) @@ -769,8 +769,8 @@ FROM WHERE user_id = 3 GROUP BY user_id) AS bar WHERE foo.user_id = bar.user_id ) AS baz; - user_id | counter | user_id | counter ----------+---------+---------+--------- + user_id | counter | user_id | counter +--------------------------------------------------------------------- (0 rows) -- Subqueries filter by different users, one of which overlaps @@ -792,8 +792,8 @@ FROM WHERE foo.user_id = bar.user_id ) AS baz ORDER BY 1,2 LIMIT 5; - user_id | counter | user_id | counter ----------+---------+---------+--------- + user_id | counter | user_id | counter +--------------------------------------------------------------------- 2 | 57 | 2 | 57 (1 row) @@ -826,9 +826,9 @@ CREATE TABLE subquery_pruning_varchar_test_table ); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('subquery_pruning_varchar_test_table', 'a', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- temporarily disable router executor to test pruning behaviour of subquery pushdown @@ -838,16 +838,16 @@ SELECT * FROM (SELECT count(*) FROM subquery_pruning_varchar_test_table WHERE a = 'onder' GROUP BY a) AS foo; DEBUG: Router planner not enabled. - count -------- + count +--------------------------------------------------------------------- (0 rows) SELECT * FROM (SELECT count(*) FROM subquery_pruning_varchar_test_table WHERE 'eren' = a GROUP BY a) AS foo; DEBUG: Router planner not enabled. - count -------- + count +--------------------------------------------------------------------- (0 rows) SET client_min_messages TO NOTICE; @@ -879,8 +879,8 @@ SELECT * FROM GROUP BY a_inner) AS foo; - a ---- + a +--------------------------------------------------------------------- (0 rows) DROP TABLE subquery_pruning_varchar_test_table; @@ -910,8 +910,8 @@ FROM GROUP BY tenant_id, user_id) AS subquery; - event_average --------------------- + event_average +--------------------------------------------------------------------- 3.6666666666666667 (1 row) @@ -979,8 +979,8 @@ GROUP BY hasdone ORDER BY event_average DESC; - event_average | hasdone ---------------------+--------------------- + event_average | hasdone +--------------------------------------------------------------------- 4.0000000000000000 | Has not done paying 2.5000000000000000 | Has done paying (2 rows) @@ -1055,8 +1055,8 @@ GROUP BY count_pay ORDER BY count_pay; - event_average | count_pay ---------------------+----------- + event_average | count_pay +--------------------------------------------------------------------- 3.0000000000000000 | 0 (1 row) @@ -1112,8 +1112,8 @@ ORDER BY user_lastseen DESC LIMIT 10; - tenant_id | user_id | user_lastseen | event_array ------------+---------+---------------+---------------------------- + tenant_id | user_id | user_lastseen | event_array +--------------------------------------------------------------------- 1 | 1003 | 1472807315 | {click,click,click,submit} 1 | 1002 | 1472807215 | {click,click,submit,pay} 1 | 1001 | 1472807115 | {click,submit,pay} diff --git a/src/test/regress/expected/multi_subquery_behavioral_analytics.out b/src/test/regress/expected/multi_subquery_behavioral_analytics.out index 4b9fdf483..3a4b779d1 100644 --- a/src/test/regress/expected/multi_subquery_behavioral_analytics.out +++ b/src/test/regress/expected/multi_subquery_behavioral_analytics.out @@ -9,9 +9,9 @@ -- by non-router code-paths. Thus, this flag should NOT be used in production. Otherwise, the actual -- router queries would fail. SET citus.enable_router_execution TO FALSE; ------------------------------------- +--------------------------------------------------------------------- -- Vanilla funnel query ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table @@ -27,17 +27,17 @@ FROM ( GROUP BY user_id ) q ORDER BY 2 DESC, 1; - user_id | array_length ----------+-------------- + user_id | array_length +--------------------------------------------------------------------- 3 | 187 2 | 180 1 | 28 (3 rows) ------------------------------------- +--------------------------------------------------------------------- -- Funnel grouped by whether or not a user has done an event -- This has multiple subqueries joinin at the top level ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event), hasdone_event FROM ( SELECT @@ -75,8 +75,8 @@ FROM ( GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id; - user_id | sum | length | hasdone_event ----------+-----+--------+---------------- + user_id | sum | length | hasdone_event +--------------------------------------------------------------------- 1 | 12 | 14 | Has done event 2 | 20 | 14 | Has done event 3 | 20 | 14 | Has done event @@ -122,8 +122,8 @@ FROM ( GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id) u; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) @@ -157,8 +157,8 @@ FROM ( GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id; - user_id | sum | length | hasdone_event ----------+-----+--------+---------------- + user_id | sum | length | hasdone_event +--------------------------------------------------------------------- 1 | 12 | 14 | Has done event 2 | 20 | 14 | Has done event 3 | 20 | 14 | Has done event @@ -196,14 +196,14 @@ FROM ( GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id) u; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) ------------------------------------- +--------------------------------------------------------------------- -- Funnel, grouped by the number of times a user has done an event ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, avg(array_length(events_table, 1)) AS event_average, @@ -268,8 +268,8 @@ GROUP BY count_pay, user_id ORDER BY event_average DESC, count_pay DESC, user_id DESC; - user_id | event_average | count_pay ----------+---------------------+----------- + user_id | event_average | count_pay +--------------------------------------------------------------------- 3 | 19.0000000000000000 | 7 2 | 12.0000000000000000 | 9 1 | 7.0000000000000000 | 5 @@ -341,8 +341,8 @@ HAVING avg(array_length(events_table, 1)) > 0 ORDER BY event_average DESC, count_pay DESC, user_id DESC; - user_id | event_average | count_pay ----------+---------------------+----------- + user_id | event_average | count_pay +--------------------------------------------------------------------- 3 | 19.0000000000000000 | 3 2 | 12.0000000000000000 | 4 1 | 7.0000000000000000 | 3 @@ -406,8 +406,8 @@ GROUP BY count_pay, user_id ORDER BY event_average DESC, count_pay DESC, user_id DESC; - user_id | event_average | count_pay ----------+---------------------+----------- + user_id | event_average | count_pay +--------------------------------------------------------------------- 3 | 12.0000000000000000 | 4 2 | 9.0000000000000000 | 5 1 | 5.0000000000000000 | 2 @@ -467,18 +467,18 @@ HAVING avg(array_length(events_table, 1)) > 0 ORDER BY event_average DESC, count_pay DESC, user_id DESC; - user_id | event_average | count_pay ----------+---------------------+----------- + user_id | event_average | count_pay +--------------------------------------------------------------------- 3 | 12.0000000000000000 | 4 2 | 9.0000000000000000 | 5 1 | 5.0000000000000000 | 2 (3 rows) ------------------------------------- +--------------------------------------------------------------------- -- Most recently seen users_table events_table ------------------------------------- +--------------------------------------------------------------------- -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, user_lastseen, @@ -504,15 +504,15 @@ FROM ( GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC, user_id; - user_id | user_lastseen | array_length ----------+---------------------------------+-------------- + user_id | user_lastseen | array_length +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) ------------------------------------- +--------------------------------------------------------------------- -- Count the number of distinct users_table who are in segment X and Y and Z ------------------------------------- +--------------------------------------------------------------------- SELECT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1 <= 2) @@ -523,8 +523,8 @@ GROUP BY ORDER BY user_id DESC LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 6 5 4 @@ -532,17 +532,17 @@ ORDER BY 1 (5 rows) ------------------------------------- +--------------------------------------------------------------------- -- Find customers who have done X, and satisfy other customer specific criteria ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, value_2 FROM users_table WHERE value_1 > 1 AND value_1 < 3 AND value_2 >= 1 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 1 AND event_type < 3 AND value_3 > 1 AND user_id = users_table.user_id) ORDER BY 2 DESC, 1 DESC LIMIT 5; - user_id | value_2 ----------+--------- + user_id | value_2 +--------------------------------------------------------------------- 6 | 4 6 | 4 2 | 4 @@ -550,25 +550,25 @@ LIMIT 5; 4 | 3 (5 rows) ------------------------------------- +--------------------------------------------------------------------- -- Customers who haven’t done X, and satisfy other customer specific criteria ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, value_2 FROM users_table WHERE value_1 = 2 AND value_2 >= 1 AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type=2 AND value_3 > 1 AND user_id = users_table.user_id) ORDER BY 1 DESC, 2 DESC LIMIT 3; - user_id | value_2 ----------+--------- + user_id | value_2 +--------------------------------------------------------------------- 5 | 5 5 | 5 5 | 2 (3 rows) ------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X and Y, and satisfy other customer specific criteria ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, sum(value_2) as cnt FROM users_table WHERE value_1 > 1 AND value_2 >= 1 @@ -578,8 +578,8 @@ GROUP BY user_id ORDER BY cnt DESC, user_id DESC LIMIT 5; - user_id | cnt ----------+----- + user_id | cnt +--------------------------------------------------------------------- 4 | 43 2 | 37 3 | 34 @@ -587,26 +587,26 @@ LIMIT 5; 6 | 15 (5 rows) ------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, value_2 FROM users_table WHERE value_2 >= 1 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 1 AND event_type <= 3 AND value_3 > 1 AND user_id = users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 3 AND event_type <= 4 AND value_3 > 1 AND user_id = users_table.user_id) ORDER BY 2 DESC, 1 DESC LIMIT 4; - user_id | value_2 ----------+--------- + user_id | value_2 +--------------------------------------------------------------------- 5 | 5 5 | 5 5 | 5 5 | 4 (4 rows) ------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X more than 2 times, and satisfy other customer specific criteria ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, avg(value_2) FROM users_table @@ -626,15 +626,15 @@ GROUP BY ORDER BY 1 DESC, 2 DESC LIMIT 5; - user_id | avg ----------+-------------------- + user_id | avg +--------------------------------------------------------------------- 4 | 2.0000000000000000 3 | 2.0000000000000000 (2 rows) ------------------------------------- +--------------------------------------------------------------------- -- Find me all users_table who logged in more than once ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, value_1 from ( SELECT @@ -648,8 +648,8 @@ SELECT user_id, value_1 from ) AS a ORDER BY user_id ASC, value_1 ASC; - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- 2 | 0 2 | 2 2 | 3 @@ -669,8 +669,8 @@ SELECT user_id, value_1 from ) AS a ORDER BY user_id ASC, value_1 ASC; - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- 2 | 0 2 | 2 2 | 3 @@ -681,9 +681,9 @@ ORDER BY 3 | 4 (8 rows) ------------------------------------- +--------------------------------------------------------------------- -- Find me all users_table who has done some event and has filters ------------------------------------- +--------------------------------------------------------------------- SELECT user_id FROM events_table WHERE @@ -697,8 +697,8 @@ WHERE value_1 = 1 AND value_2 > 2 ) ORDER BY 1; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 2 @@ -706,9 +706,9 @@ ORDER BY 1; 5 (5 rows) ------------------------------------- +--------------------------------------------------------------------- -- Which events_table did people who has done some specific events_table ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, event_type FROM events_table WHERE @@ -717,16 +717,16 @@ GROUP BY user_id, event_type ORDER BY 2 DESC, 1 LIMIT 3; - user_id | event_type ----------+------------ + user_id | event_type +--------------------------------------------------------------------- 1 | 6 2 | 5 3 | 5 (3 rows) ------------------------------------- +--------------------------------------------------------------------- -- Find me all the users_table who has done some event more than three times ------------------------------------- +--------------------------------------------------------------------- SELECT user_id FROM ( SELECT @@ -742,8 +742,8 @@ SELECT user_id FROM ) AS a ORDER BY user_id; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -751,9 +751,9 @@ ORDER BY 6 (5 rows) ------------------------------------- +--------------------------------------------------------------------- -- Find my assets that have the highest probability and fetch their metadata ------------------------------------- +--------------------------------------------------------------------- CREATE TEMP TABLE assets AS SELECT users_table.user_id, users_table.value_1, prob @@ -772,8 +772,8 @@ FROM users_table.value_1 < 2; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM assets; - count | count | avg --------+-------+-------------------- + count | count | avg +--------------------------------------------------------------------- 732 | 6 | 3.3934426229508197 (1 row) @@ -795,10 +795,10 @@ SELECT count(*) FROM HAVING count(distinct value_1) = 2 ) as foo; -DEBUG: generating subplan 23_1 for subquery SELECT user_id FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.=) 4) -DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id FROM public.users_table WHERE (((users_table.value_1 OPERATOR(pg_catalog.=) 1) OR (users_table.value_1 OPERATOR(pg_catalog.=) 3)) AND (NOT (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('23_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) GROUP BY users_table.user_id HAVING (count(DISTINCT users_table.value_1) OPERATOR(pg_catalog.=) 2)) foo - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.=) 4) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id FROM public.users_table WHERE (((users_table.value_1 OPERATOR(pg_catalog.=) 1) OR (users_table.value_1 OPERATOR(pg_catalog.=) 3)) AND (NOT (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) GROUP BY users_table.user_id HAVING (count(DISTINCT users_table.value_1) OPERATOR(pg_catalog.=) 2)) foo + count +--------------------------------------------------------------------- 1 (1 row) @@ -831,8 +831,8 @@ SELECT subquery_count FROM GROUP BY a.user_id ) AS inner_subquery; - subquery_count ----------------- + subquery_count +--------------------------------------------------------------------- 1 (1 row) @@ -864,8 +864,8 @@ WHERE b.user_id IS NULL GROUP BY a.user_id; - subquery_count ----------------- + subquery_count +--------------------------------------------------------------------- 1 (1 row) @@ -940,8 +940,8 @@ WHERE GROUP BY e1.user_id LIMIT 1; - user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen ----------+-----------------+----------+---------------------+------------------+------------------- + user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen +--------------------------------------------------------------------- 1 | 1 | 1 | 1 | 1 | 1 (1 row) @@ -1005,8 +1005,8 @@ FROM ( GROUP BY e1.user_id ORDER BY 6 DESC NULLS LAST, 5 DESC NULLS LAST, 4 DESC NULLS LAST, 3 DESC NULLS LAST, 2 DESC NULLS LAST, 1 LIMIT 15; - user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen ----------+-----------------+----------+---------------------+------------------+------------------- + user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen +--------------------------------------------------------------------- 2 | 1080 | 1080 | 1080 | 1080 | 1080 3 | 540 | 540 | 540 | 540 | 540 4 | 252 | 252 | 252 | 252 | 252 @@ -1076,8 +1076,8 @@ group by e1.user_id HAVING sum(submit_card_info) > 0 ORDER BY 6 DESC NULLS LAST, 5 DESC NULLS LAST, 4 DESC NULLS LAST, 3 DESC NULLS LAST, 2 DESC NULLS LAST, 1 LIMIT 15; - user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen ----------+-----------------+----------+---------------------+------------------+------------------- + user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen +--------------------------------------------------------------------- 2 | 1080 | 1080 | 1080 | 1080 | 1080 3 | 540 | 540 | 540 | 540 | 540 4 | 252 | 252 | 252 | 252 | 252 @@ -1116,8 +1116,8 @@ GROUP BY ORDER BY avg(b.value_3), 2, 1 LIMIT 5; - user_id | subquery_avg ----------+-------------------- + user_id | subquery_avg +--------------------------------------------------------------------- 3 | 3.6000000000000000 5 | 2.1666666666666667 4 | 2.6666666666666667 @@ -1153,8 +1153,8 @@ HAVING ORDER BY avg(b.value_3), 2, 1 LIMIT 5; - user_id | subquery_avg ----------+-------------------- + user_id | subquery_avg +--------------------------------------------------------------------- 3 | 3.6000000000000000 5 | 2.1666666666666667 4 | 2.6666666666666667 @@ -1192,8 +1192,8 @@ GROUP BY ORDER BY avg(b.value_3) DESC, 2, 1 LIMIT 5; - user_id | subquery_avg | avg ----------+--------------------+------------------ + user_id | subquery_avg | avg +--------------------------------------------------------------------- 1 | 2.3333333333333333 | 3.33333333333333 4 | 2.6666666666666667 | 2.55555555555556 5 | 2.1666666666666667 | 2.16666666666667 @@ -1226,8 +1226,8 @@ GROUP BY ORDER BY 4 DESC, 1 DESC, 2 ASC, 3 ASC LIMIT 10; - user_id | value_2 | value_3 | counts ----------+---------+---------+-------- + user_id | value_2 | value_3 | counts +--------------------------------------------------------------------- 5 | 3 | 4 | 160 2 | 3 | 5 | 156 3 | 2 | 5 | 108 @@ -1253,8 +1253,8 @@ GROUP BY ORDER BY users_count desc, avg_type DESC LIMIT 5; - avg_type | users_count ---------------------+------------- + avg_type | users_count +--------------------------------------------------------------------- 2.3750000000000000 | 24 2.5714285714285714 | 21 2.5294117647058824 | 17 @@ -1282,8 +1282,8 @@ FROM events_table ORDER BY users_count.ct desc, event_type DESC LIMIT 5; - event_type | ct -------------+---- + event_type | ct +--------------------------------------------------------------------- 5 | 26 4 | 26 3 | 26 @@ -1314,8 +1314,8 @@ FROM ORDER BY total_count DESC, count_1 DESC, 1 DESC LIMIT 10; - user_id | count_1 | total_count ----------+---------+------------- + user_id | count_1 | total_count +--------------------------------------------------------------------- 2 | 18 | 7 3 | 17 | 7 2 | 18 | 6 @@ -1356,8 +1356,8 @@ WHERE b.user_id IS NOT NULL GROUP BY a.user_id ORDER BY avg(b.value_3), 2, 1 LIMIT 5; - user_id | subquery_avg ----------+------------------------ + user_id | subquery_avg +--------------------------------------------------------------------- 5 | 0.00000000000000000000 3 | 2.0000000000000000 4 | 1.00000000000000000000 @@ -1424,8 +1424,8 @@ GROUP BY ORDER BY avg(b.value_3), 2, 1 LIMIT 5; - user_id | subquery_avg ----------+-------------------- + user_id | subquery_avg +--------------------------------------------------------------------- 3 | 3.3333333333333333 5 | 2.2000000000000000 4 | 3.2500000000000000 @@ -1462,8 +1462,8 @@ FROM ORDER BY prob DESC, value_2 DESC, user_id DESC, event_type DESC LIMIT 10; - user_id | event_type ----------+------------ + user_id | event_type +--------------------------------------------------------------------- 3 | 5 3 | 4 3 | 4 @@ -1500,8 +1500,8 @@ FROM ORDER BY prob DESC, event_type DESC, user_id DESC LIMIT 10; - user_id | event_type ----------+------------ + user_id | event_type +--------------------------------------------------------------------- 3 | 5 2 | 5 2 | 5 @@ -1546,8 +1546,8 @@ FROM ORDER BY prob DESC, event_type DESC, user_id DESC LIMIT 10; - user_id | event_type ----------+------------ + user_id | event_type +--------------------------------------------------------------------- 3 | 5 2 | 5 2 | 5 @@ -1571,8 +1571,8 @@ SELECT * FROM run_command_on_workers('CREATE OR REPLACE FUNCTION array_index(AN LIMIT 1; $$ LANGUAGE sql') ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+----------------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | CREATE FUNCTION localhost | 57638 | t | CREATE FUNCTION (2 rows) @@ -1645,8 +1645,8 @@ AS outer_outer_sub_q ORDER BY value_3 ASC, user_id DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC, event_type_e DESC LIMIT 10; - user_id_e | event_type_e | value_2 | value_3 | user_id ------------+--------------+---------+---------+--------- + user_id_e | event_type_e | value_2 | value_3 | user_id +--------------------------------------------------------------------- 5 | 5 | 2 | 0 | 5 5 | 5 | 2 | 0 | 5 5 | 5 | 2 | 0 | 5 @@ -1728,8 +1728,8 @@ FROM ORDER BY value_3 ASC, user_id DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC, event_type_e DESC LIMIT 10; - user_id_e | event_type_e | value_2 | value_3 | user_id ------------+--------------+---------+---------+--------- + user_id_e | event_type_e | value_2 | value_3 | user_id +--------------------------------------------------------------------- 5 | 5 | 2 | 0 | 5 5 | 5 | 2 | 0 | 5 5 | 5 | 2 | 0 | 5 @@ -1745,8 +1745,8 @@ LIMIT 10; -- drop created functions SELECT * FROM run_command_on_workers('DROP FUNCTION array_index(ANYARRAY, ANYELEMENT)') ORDER BY 1,2; - nodename | nodeport | success | result ------------+----------+---------+--------------- + nodename | nodeport | success | result +--------------------------------------------------------------------- localhost | 57637 | t | DROP FUNCTION localhost | 57638 | t | DROP FUNCTION (2 rows) @@ -1771,8 +1771,8 @@ FROM ( ON a.user_id = b.user_id WHERE b.user_id IS NULL GROUP BY a.user_id; - subquery_count ----------------- + subquery_count +--------------------------------------------------------------------- 1 1 1 @@ -1843,8 +1843,8 @@ FROM ( ORDER BY 2 DESC, 1 LIMIT 1+1 OFFSET 1::smallint; DEBUG: push down of limit count: 3 - user_id | array_length ----------+-------------- + user_id | array_length +--------------------------------------------------------------------- 4 | 184 2 | 180 (2 rows) @@ -1867,8 +1867,8 @@ FROM ( ORDER BY 2 DESC, 1 LIMIT '3' OFFSET 2+1; DEBUG: push down of limit count: 6 - user_id | array_length ----------+-------------- + user_id | array_length +--------------------------------------------------------------------- 5 | 156 6 | 40 1 | 28 @@ -1897,8 +1897,8 @@ FROM ( ORDER BY 2 DESC, 1 LIMIT volatile_func_test() + (ROW(1,2,NULL) < ROW(1,3,0))::int OFFSET volatile_func_test() + volatile_func_test(); DEBUG: push down of limit count: 4 - user_id | array_length ----------+-------------- + user_id | array_length +--------------------------------------------------------------------- 3 | 340 5 | 312 (2 rows) @@ -1926,8 +1926,8 @@ LIMIT (5 > 4)::int OFFSET ELSE 2 END; DEBUG: push down of limit count: 3 - user_id | array_length ----------+-------------- + user_id | array_length +--------------------------------------------------------------------- 2 | 180 (1 row) @@ -1949,8 +1949,8 @@ FROM ( LIMIT $1 OFFSET $2; EXECUTE parametrized_limit(1,1); DEBUG: push down of limit count: 2 - user_id | array_length ----------+-------------- + user_id | array_length +--------------------------------------------------------------------- 4 | 184 (1 row) @@ -1971,8 +1971,8 @@ FROM ( LIMIT 1 OFFSET $1; EXECUTE parametrized_offset(1); DEBUG: push down of limit count: 2 - user_id | array_length ----------+-------------- + user_id | array_length +--------------------------------------------------------------------- 4 | 184 (1 row) @@ -1992,8 +1992,8 @@ CREATE FUNCTION test_join_function_2(integer, integer) RETURNS bool RETURNS NULL ON NULL INPUT; $f$); - run_command_on_workers ---------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE FUNCTION") (localhost,57638,t,"CREATE FUNCTION") (2 rows) @@ -2030,8 +2030,8 @@ FROM ON users_table.user_id = temp.user_id WHERE users_table.value_1 < 3 AND test_join_function_2(users_table.user_id, temp.user_id); - user_id | value_1 | prob ----------+---------+------ + user_id | value_1 | prob +--------------------------------------------------------------------- (0 rows) -- we do support the following since there is already an equality on the partition @@ -2054,8 +2054,8 @@ FROM users_table.value_1 < 3 ORDER BY 2 DESC, 1 DESC LIMIT 10; - user_id | value_1 | prob ----------+---------+------------------------ + user_id | value_1 | prob +--------------------------------------------------------------------- 6 | 2 | 0.50000000000000000000 6 | 2 | 0.50000000000000000000 6 | 2 | 0.50000000000000000000 @@ -2082,8 +2082,8 @@ FROM events_table.time > users_table.time AND events_table.value_2 IN (0, 4) ) as foo; - count -------- + count +--------------------------------------------------------------------- 180 (1 row) @@ -2123,8 +2123,8 @@ FROM ) as bar WHERE foo.event_type > bar.event_type AND foo.user_id = bar.user_id; - count -------- + count +--------------------------------------------------------------------- 11971 (1 row) @@ -2168,8 +2168,8 @@ FROM WHERE temp.value_1 < 3 ORDER BY 1 LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -2194,8 +2194,8 @@ FROM WHERE temp.value_1 < 3 ORDER BY 1, 2 LIMIT 5; - user_id | value_1 | prob ----------+---------+------------------------ + user_id | value_1 | prob +--------------------------------------------------------------------- 1 | 1 | 0.50000000000000000000 2 | 0 | 0.50000000000000000000 3 | 0 | 0.50000000000000000000 @@ -2219,8 +2219,8 @@ FROM ON users_ids.user_id = temp.user_id ORDER BY 1,2 LIMIT 5; - user_id | value_1 | prob ----------+---------+------------------------ + user_id | value_1 | prob +--------------------------------------------------------------------- 1 | 1 | 0.50000000000000000000 2 | 0 | 0.50000000000000000000 3 | 0 | 0.50000000000000000000 @@ -2244,9 +2244,9 @@ FROM GROUP BY 1 ) AS temp; - count | avg --------+----- - 6 | + count | avg +--------------------------------------------------------------------- + 6 | (1 row) -- Test the case when a subquery has a lateral reference to two levels upper @@ -2275,8 +2275,8 @@ LATERAL ( ) b ORDER BY user_id, value_2, cnt LIMIT 1; - user_id | value_2 | cnt ----------+---------+----- + user_id | value_2 | cnt +--------------------------------------------------------------------- 2 | 0 | 1 (1 row) @@ -2286,8 +2286,8 @@ SELECT run_command_on_workers($f$ DROP FUNCTION test_join_function_2(integer, integer); $f$); - run_command_on_workers -------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"DROP FUNCTION") (localhost,57638,t,"DROP FUNCTION") (2 rows) diff --git a/src/test/regress/expected/multi_subquery_complex_queries.out b/src/test/regress/expected/multi_subquery_complex_queries.out index ce61f1df1..b9b329570 100644 --- a/src/test/regress/expected/multi_subquery_complex_queries.out +++ b/src/test/regress/expected/multi_subquery_complex_queries.out @@ -68,8 +68,8 @@ GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 449 1 | 433 2 | 75 @@ -133,8 +133,8 @@ GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 449 1 | 433 2 | 75 @@ -199,8 +199,8 @@ GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 449 1 | 234 2 | 75 @@ -264,8 +264,8 @@ GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 449 1 | 369 2 | 75 @@ -343,8 +343,8 @@ GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 449 2 | 433 3 | 75 @@ -423,14 +423,14 @@ GROUP BY types ORDER BY types; -DEBUG: generating subplan 16_1 for subquery SELECT max(events."time") AS max, 0 AS event, events.user_id FROM public.events_table events, public.users_table users WHERE ((events.user_id OPERATOR(pg_catalog.=) users.value_2) AND (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2]))) GROUP BY events.user_id -DEBUG: generating subplan 16_2 for subquery SELECT "time", event, user_id FROM (SELECT events."time", 0 AS event, events.user_id FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2]))) events_subquery_1 -DEBUG: generating subplan 16_3 for subquery SELECT "time", event, user_id FROM (SELECT events."time", 2 AS event, events.user_id FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[3, 4]))) events_subquery_3 -DEBUG: generating subplan 16_4 for subquery SELECT "time", event, user_id FROM (SELECT events."time", 3 AS event, events.user_id FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) events_subquery_4 -DEBUG: generating subplan 16_5 for subquery SELECT intermediate_result."time", intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('16_2'::text, 'binary'::citus_copy_format) intermediate_result("time" timestamp without time zone, event integer, user_id integer) UNION SELECT events_subquery_2.max, events_subquery_2.event, events_subquery_2.user_id FROM (SELECT events_subquery_5.max, events_subquery_5.event, events_subquery_5.user_id FROM (SELECT intermediate_result.max, intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(max timestamp without time zone, event integer, user_id integer)) events_subquery_5) events_subquery_2 UNION SELECT intermediate_result."time", intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('16_3'::text, 'binary'::citus_copy_format) intermediate_result("time" timestamp without time zone, event integer, user_id integer) UNION SELECT intermediate_result."time", intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('16_4'::text, 'binary'::citus_copy_format) intermediate_result("time" timestamp without time zone, event integer, user_id integer) -DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT event_types AS types, count(*) AS sumofeventtype FROM (SELECT q.user_id, q."time", q.event_types, t.user_id, random() AS random FROM ((SELECT t_1.user_id, t_1."time", unnest(t_1.collected_events) AS event_types FROM (SELECT t1.user_id, min(t1."time") AS "time", array_agg(t1.event ORDER BY t1."time", t1.event DESC) AS collected_events FROM (SELECT intermediate_result."time", intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('16_5'::text, 'binary'::citus_copy_format) intermediate_result("time" timestamp without time zone, event integer, user_id integer)) t1 GROUP BY t1.user_id) t_1) q JOIN (SELECT users.user_id FROM public.users_table users WHERE ((users.value_1 OPERATOR(pg_catalog.>) 0) AND (users.value_1 OPERATOR(pg_catalog.<) 4))) t ON ((t.user_id OPERATOR(pg_catalog.=) q.user_id)))) final_query(user_id, "time", event_types, user_id_1, random) GROUP BY event_types ORDER BY event_types - types | sumofeventtype --------+---------------- +DEBUG: generating subplan XXX_1 for subquery SELECT max(events."time") AS max, 0 AS event, events.user_id FROM public.events_table events, public.users_table users WHERE ((events.user_id OPERATOR(pg_catalog.=) users.value_2) AND (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2]))) GROUP BY events.user_id +DEBUG: generating subplan XXX_2 for subquery SELECT "time", event, user_id FROM (SELECT events."time", 0 AS event, events.user_id FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2]))) events_subquery_1 +DEBUG: generating subplan XXX_3 for subquery SELECT "time", event, user_id FROM (SELECT events."time", 2 AS event, events.user_id FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[3, 4]))) events_subquery_3 +DEBUG: generating subplan XXX_4 for subquery SELECT "time", event, user_id FROM (SELECT events."time", 3 AS event, events.user_id FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) events_subquery_4 +DEBUG: generating subplan XXX_5 for subquery SELECT intermediate_result."time", intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result("time" timestamp without time zone, event integer, user_id integer) UNION SELECT events_subquery_2.max, events_subquery_2.event, events_subquery_2.user_id FROM (SELECT events_subquery_5.max, events_subquery_5.event, events_subquery_5.user_id FROM (SELECT intermediate_result.max, intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max timestamp without time zone, event integer, user_id integer)) events_subquery_5) events_subquery_2 UNION SELECT intermediate_result."time", intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result("time" timestamp without time zone, event integer, user_id integer) UNION SELECT intermediate_result."time", intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result("time" timestamp without time zone, event integer, user_id integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT event_types AS types, count(*) AS sumofeventtype FROM (SELECT q.user_id, q."time", q.event_types, t.user_id, random() AS random FROM ((SELECT t_1.user_id, t_1."time", unnest(t_1.collected_events) AS event_types FROM (SELECT t1.user_id, min(t1."time") AS "time", array_agg(t1.event ORDER BY t1."time", t1.event DESC) AS collected_events FROM (SELECT intermediate_result."time", intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result("time" timestamp without time zone, event integer, user_id integer)) t1 GROUP BY t1.user_id) t_1) q JOIN (SELECT users.user_id FROM public.users_table users WHERE ((users.value_1 OPERATOR(pg_catalog.>) 0) AND (users.value_1 OPERATOR(pg_catalog.<) 4))) t ON ((t.user_id OPERATOR(pg_catalog.=) q.user_id)))) final_query(user_id, "time", event_types, user_id_1, random) GROUP BY event_types ORDER BY event_types + types | sumofeventtype +--------------------------------------------------------------------- 0 | 449 2 | 433 3 | 75 @@ -500,10 +500,10 @@ GROUP BY types ORDER BY types; -DEBUG: generating subplan 22_1 for subquery SELECT user_id, "time", unnest(collected_events) AS event_types FROM (SELECT t1.user_id, min(t1."time") AS "time", array_agg(t1.event ORDER BY t1."time", t1.event DESC) AS collected_events FROM (SELECT events_subquery_1.user_id, events_subquery_1."time", events_subquery_1.event FROM (SELECT events.user_id, events."time", 0 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2]))) events_subquery_1 UNION SELECT events_subquery_2.user_id, events_subquery_2."time", events_subquery_2.event FROM (SELECT events.user_id, events."time", 1 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[3, 4]))) events_subquery_2 UNION SELECT events_subquery_3.user_id, events_subquery_3."time", events_subquery_3.event FROM (SELECT events.user_id, events."time", 2 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) events_subquery_3 UNION SELECT events_subquery_4.user_id, events_subquery_4."time", events_subquery_4.event FROM (SELECT events.user_id, events."time", 3 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[4, 5]))) events_subquery_4) t1 GROUP BY t1.user_id) t -DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT event_types AS types, count(*) AS sumofeventtype FROM (SELECT q.user_id, q."time", q.event_types, t.user_id, random() AS random FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_types FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_types integer)) q JOIN (SELECT users.user_id FROM public.users_table users WHERE ((users.value_1 OPERATOR(pg_catalog.>) 0) AND (users.value_1 OPERATOR(pg_catalog.<) 4))) t ON ((t.user_id OPERATOR(pg_catalog.<>) q.user_id)))) final_query(user_id, "time", event_types, user_id_1, random) GROUP BY event_types ORDER BY event_types - types | sumofeventtype --------+---------------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, "time", unnest(collected_events) AS event_types FROM (SELECT t1.user_id, min(t1."time") AS "time", array_agg(t1.event ORDER BY t1."time", t1.event DESC) AS collected_events FROM (SELECT events_subquery_1.user_id, events_subquery_1."time", events_subquery_1.event FROM (SELECT events.user_id, events."time", 0 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2]))) events_subquery_1 UNION SELECT events_subquery_2.user_id, events_subquery_2."time", events_subquery_2.event FROM (SELECT events.user_id, events."time", 1 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[3, 4]))) events_subquery_2 UNION SELECT events_subquery_3.user_id, events_subquery_3."time", events_subquery_3.event FROM (SELECT events.user_id, events."time", 2 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) events_subquery_3 UNION SELECT events_subquery_4.user_id, events_subquery_4."time", events_subquery_4.event FROM (SELECT events.user_id, events."time", 3 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[4, 5]))) events_subquery_4) t1 GROUP BY t1.user_id) t +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT event_types AS types, count(*) AS sumofeventtype FROM (SELECT q.user_id, q."time", q.event_types, t.user_id, random() AS random FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_types FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_types integer)) q JOIN (SELECT users.user_id FROM public.users_table users WHERE ((users.value_1 OPERATOR(pg_catalog.>) 0) AND (users.value_1 OPERATOR(pg_catalog.<) 4))) t ON ((t.user_id OPERATOR(pg_catalog.<>) q.user_id)))) final_query(user_id, "time", event_types, user_id_1, random) GROUP BY event_types ORDER BY event_types + types | sumofeventtype +--------------------------------------------------------------------- 0 | 2088 1 | 2163 2 | 397 @@ -652,8 +652,8 @@ INNER JOIN WHERE value_1 > 0 and value_1 < 4) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 449 1 | 434 2 | 433 @@ -716,8 +716,8 @@ INNER JOIN WHERE value_1 > 0 and value_1 < 4) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 449 1 | 433 2 | 75 @@ -779,8 +779,8 @@ INNER JOIN ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 449 1 | 433 2 | 75 @@ -838,8 +838,8 @@ INNER JOIN WHERE value_1 > 0 and value_1 < 4) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 449 1 | 433 2 | 62 @@ -902,8 +902,8 @@ INNER JOIN ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 449 1 | 433 2 | 75 @@ -982,8 +982,8 @@ INNER JOIN GROUP BY user_id ORDER BY cnt DESC, user_id DESC LIMIT 10; - user_id | cnt ----------+----- + user_id | cnt +--------------------------------------------------------------------- 3 | 275 6 | 72 (2 rows) @@ -1062,10 +1062,10 @@ INNER JOIN GROUP BY user_id ORDER BY cnt DESC, user_id DESC LIMIT 10; -DEBUG: generating subplan 42_1 for subquery SELECT DISTINCT user_id FROM public.events_table events WHERE (event_type OPERATOR(pg_catalog.=) ANY (ARRAY[0, 6])) GROUP BY user_id -DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT user_id, count(*) AS cnt FROM (SELECT first_query.user_id, random() AS random FROM ((SELECT t.user_id, t."time", unnest(t.collected_events) AS event_types FROM (SELECT t1.user_id, min(t1."time") AS "time", array_agg(t1.event ORDER BY t1."time", t1.event DESC) AS collected_events FROM (SELECT events_subquery_1.user_id, events_subquery_1."time", events_subquery_1.event FROM (SELECT events.user_id, events."time", 0 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2]))) events_subquery_1 UNION ALL SELECT events_subquery_2.user_id, events_subquery_2."time", events_subquery_2.event FROM (SELECT events.user_id, events."time", 1 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[3, 4]))) events_subquery_2 UNION ALL SELECT events_subquery_3.user_id, events_subquery_3."time", events_subquery_3.event FROM (SELECT events.user_id, events."time", 2 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) events_subquery_3 UNION ALL SELECT events_subquery_4.user_id, events_subquery_4."time", events_subquery_4.event FROM (SELECT events.user_id, events."time", 3 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6]))) events_subquery_4) t1 GROUP BY t1.user_id) t) first_query JOIN (SELECT t.user_id FROM ((SELECT users.user_id FROM public.users_table users WHERE ((users.value_1 OPERATOR(pg_catalog.>) 0) AND (users.value_1 OPERATOR(pg_catalog.<) 4))) t LEFT JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) t2 ON ((t2.user_id OPERATOR(pg_catalog.>) t.user_id))) WHERE (t2.user_id IS NULL)) second_query ON ((first_query.user_id OPERATOR(pg_catalog.=) second_query.user_id)))) final_query GROUP BY user_id ORDER BY (count(*)) DESC, user_id DESC LIMIT 10 - user_id | cnt ----------+----- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT user_id FROM public.events_table events WHERE (event_type OPERATOR(pg_catalog.=) ANY (ARRAY[0, 6])) GROUP BY user_id +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, count(*) AS cnt FROM (SELECT first_query.user_id, random() AS random FROM ((SELECT t.user_id, t."time", unnest(t.collected_events) AS event_types FROM (SELECT t1.user_id, min(t1."time") AS "time", array_agg(t1.event ORDER BY t1."time", t1.event DESC) AS collected_events FROM (SELECT events_subquery_1.user_id, events_subquery_1."time", events_subquery_1.event FROM (SELECT events.user_id, events."time", 0 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2]))) events_subquery_1 UNION ALL SELECT events_subquery_2.user_id, events_subquery_2."time", events_subquery_2.event FROM (SELECT events.user_id, events."time", 1 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[3, 4]))) events_subquery_2 UNION ALL SELECT events_subquery_3.user_id, events_subquery_3."time", events_subquery_3.event FROM (SELECT events.user_id, events."time", 2 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) events_subquery_3 UNION ALL SELECT events_subquery_4.user_id, events_subquery_4."time", events_subquery_4.event FROM (SELECT events.user_id, events."time", 3 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6]))) events_subquery_4) t1 GROUP BY t1.user_id) t) first_query JOIN (SELECT t.user_id FROM ((SELECT users.user_id FROM public.users_table users WHERE ((users.value_1 OPERATOR(pg_catalog.>) 0) AND (users.value_1 OPERATOR(pg_catalog.<) 4))) t LEFT JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) t2 ON ((t2.user_id OPERATOR(pg_catalog.>) t.user_id))) WHERE (t2.user_id IS NULL)) second_query ON ((first_query.user_id OPERATOR(pg_catalog.=) second_query.user_id)))) final_query GROUP BY user_id ORDER BY (count(*)) DESC, user_id DESC LIMIT 10 + user_id | cnt +--------------------------------------------------------------------- 5 | 324 6 | 72 (2 rows) @@ -1145,8 +1145,8 @@ INNER JOIN GROUP BY user_id ORDER BY cnt DESC, user_id DESC LIMIT 10; - user_id | cnt ----------+----- + user_id | cnt +--------------------------------------------------------------------- 3 | 275 6 | 72 (2 rows) @@ -1191,8 +1191,8 @@ FROM order BY user_id LIMIT 50; - user_id | lastseen ----------+--------------------------------- + user_id | lastseen +--------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 3 | Thu Nov 23 18:08:26.550729 2017 (2 rows) @@ -1230,8 +1230,8 @@ FROM ORDER BY user_id limit 50; - user_id | lastseen ----------+--------------------------------- + user_id | lastseen +--------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 3 | Thu Nov 23 18:08:26.550729 2017 (2 rows) @@ -1371,8 +1371,8 @@ FROM ORDER BY user_id DESC, lastseen DESC LIMIT 10; - user_id | lastseen ----------+--------------------------------- + user_id | lastseen +--------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 2 | Thu Nov 23 17:26:14.563216 2017 2 | Thu Nov 23 17:26:14.563216 2017 @@ -1435,8 +1435,8 @@ SELECT "some_users_data".user_id, MAX(lastseen), count(*) GROUP BY 1 ORDER BY 2, 1 DESC LIMIT 10; - user_id | max | count ----------+---------------------------------+------- + user_id | max | count +--------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 | 10 (1 row) @@ -1497,7 +1497,7 @@ ORDER BY LIMIT 10; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 53_1 for subquery SELECT user_id FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_2 OPERATOR(pg_catalog.>) 3)) +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_2 OPERATOR(pg_catalog.>) 3)) DEBUG: skipping recursive planning for the subquery since it contains references to outer queries DEBUG: Router planner cannot handle multi-shard select queries DEBUG: skipping recursive planning for the subquery since it contains references to outer queries @@ -1558,7 +1558,7 @@ FROM ORDER BY user_id DESC, lastseen DESC LIMIT 10; -DEBUG: generating subplan 56_1 for subquery SELECT user_id, value_1 FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_2 OPERATOR(pg_catalog.>) 3)) +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, value_1 FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_2 OPERATOR(pg_catalog.>) 3)) ERROR: cannot push down this subquery DETAIL: Limit in subquery is currently unsupported when a subquery references a column from another query SET citus.enable_repartition_joins to OFF; @@ -1716,8 +1716,8 @@ GROUP BY "generated_group_field" ORDER BY generated_group_field DESC, value DESC; - value | generated_group_field --------+----------------------- + value | generated_group_field +--------------------------------------------------------------------- 1 | 5 2 | 2 2 | 1 @@ -1768,10 +1768,10 @@ GROUP BY "generated_group_field" ORDER BY generated_group_field DESC, value DESC; -DEBUG: generating subplan 64_1 for subquery SELECT user_id, value_2 FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_3 OPERATOR(pg_catalog.>) (3)::double precision)) -DEBUG: Plan 64 query after replacing subqueries and CTEs: SELECT count(*) AS value, generated_group_field FROM (SELECT DISTINCT "pushedDownQuery_1".real_user_id, "pushedDownQuery_1".generated_group_field FROM (SELECT "eventQuery".real_user_id, "eventQuery"."time", random() AS random, "eventQuery".value_2 AS generated_group_field FROM (SELECT temp_data_queries."time", temp_data_queries.user_id, temp_data_queries.value_2, user_filters_1.real_user_id FROM ((SELECT events."time", events.user_id, events.value_2 FROM public.events_table events WHERE ((events.user_id OPERATOR(pg_catalog.>) 1) AND (events.user_id OPERATOR(pg_catalog.<) 4) AND (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[4, 5])))) temp_data_queries JOIN (SELECT user_where_1_1.real_user_id FROM ((SELECT users.user_id AS real_user_id FROM public.users_table users WHERE ((users.user_id OPERATOR(pg_catalog.>) 1) AND (users.user_id OPERATOR(pg_catalog.<) 4) AND (users.value_2 OPERATOR(pg_catalog.>) 3))) user_where_1_1 JOIN (SELECT intermediate_result.user_id, intermediate_result.value_2 FROM read_intermediate_result('64_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) user_where_1_join_1 ON ((user_where_1_1.real_user_id OPERATOR(pg_catalog.=) user_where_1_join_1.value_2)))) user_filters_1 ON ((temp_data_queries.user_id OPERATOR(pg_catalog.=) user_filters_1.real_user_id)))) "eventQuery") "pushedDownQuery_1") "pushedDownQuery" GROUP BY generated_group_field ORDER BY generated_group_field DESC, (count(*)) DESC - value | generated_group_field --------+----------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, value_2 FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_3 OPERATOR(pg_catalog.>) (3)::double precision)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS value, generated_group_field FROM (SELECT DISTINCT "pushedDownQuery_1".real_user_id, "pushedDownQuery_1".generated_group_field FROM (SELECT "eventQuery".real_user_id, "eventQuery"."time", random() AS random, "eventQuery".value_2 AS generated_group_field FROM (SELECT temp_data_queries."time", temp_data_queries.user_id, temp_data_queries.value_2, user_filters_1.real_user_id FROM ((SELECT events."time", events.user_id, events.value_2 FROM public.events_table events WHERE ((events.user_id OPERATOR(pg_catalog.>) 1) AND (events.user_id OPERATOR(pg_catalog.<) 4) AND (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[4, 5])))) temp_data_queries JOIN (SELECT user_where_1_1.real_user_id FROM ((SELECT users.user_id AS real_user_id FROM public.users_table users WHERE ((users.user_id OPERATOR(pg_catalog.>) 1) AND (users.user_id OPERATOR(pg_catalog.<) 4) AND (users.value_2 OPERATOR(pg_catalog.>) 3))) user_where_1_1 JOIN (SELECT intermediate_result.user_id, intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) user_where_1_join_1 ON ((user_where_1_1.real_user_id OPERATOR(pg_catalog.=) user_where_1_join_1.value_2)))) user_filters_1 ON ((temp_data_queries.user_id OPERATOR(pg_catalog.=) user_filters_1.real_user_id)))) "eventQuery") "pushedDownQuery_1") "pushedDownQuery" GROUP BY generated_group_field ORDER BY generated_group_field DESC, (count(*)) DESC + value | generated_group_field +--------------------------------------------------------------------- 1 | 5 2 | 2 2 | 1 @@ -1820,10 +1820,10 @@ GROUP BY "generated_group_field" ORDER BY generated_group_field DESC, value DESC; -DEBUG: generating subplan 66_1 for subquery SELECT user_id, value_2 FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_3 OPERATOR(pg_catalog.>) (3)::double precision)) -DEBUG: Plan 66 query after replacing subqueries and CTEs: SELECT count(*) AS value, generated_group_field FROM (SELECT DISTINCT "pushedDownQuery_1".real_user_id, "pushedDownQuery_1".generated_group_field FROM (SELECT "eventQuery".real_user_id, "eventQuery"."time", random() AS random, "eventQuery".value_2 AS generated_group_field FROM (SELECT temp_data_queries."time", temp_data_queries.user_id, temp_data_queries.value_2, user_filters_1.real_user_id FROM ((SELECT events."time", events.user_id, events.value_2 FROM public.events_table events WHERE ((events.user_id OPERATOR(pg_catalog.>) 1) AND (events.user_id OPERATOR(pg_catalog.<) 4) AND (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[4, 5])))) temp_data_queries JOIN (SELECT user_where_1_1.real_user_id FROM ((SELECT users.user_id AS real_user_id FROM public.users_table users WHERE ((users.user_id OPERATOR(pg_catalog.>) 1) AND (users.user_id OPERATOR(pg_catalog.<) 4) AND (users.value_2 OPERATOR(pg_catalog.>) 3))) user_where_1_1 JOIN (SELECT intermediate_result.user_id, intermediate_result.value_2 FROM read_intermediate_result('66_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) user_where_1_join_1 ON ((user_where_1_1.real_user_id OPERATOR(pg_catalog.>=) user_where_1_join_1.user_id)))) user_filters_1 ON ((temp_data_queries.user_id OPERATOR(pg_catalog.=) user_filters_1.real_user_id)))) "eventQuery") "pushedDownQuery_1") "pushedDownQuery" GROUP BY generated_group_field ORDER BY generated_group_field DESC, (count(*)) DESC - value | generated_group_field --------+----------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, value_2 FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_3 OPERATOR(pg_catalog.>) (3)::double precision)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS value, generated_group_field FROM (SELECT DISTINCT "pushedDownQuery_1".real_user_id, "pushedDownQuery_1".generated_group_field FROM (SELECT "eventQuery".real_user_id, "eventQuery"."time", random() AS random, "eventQuery".value_2 AS generated_group_field FROM (SELECT temp_data_queries."time", temp_data_queries.user_id, temp_data_queries.value_2, user_filters_1.real_user_id FROM ((SELECT events."time", events.user_id, events.value_2 FROM public.events_table events WHERE ((events.user_id OPERATOR(pg_catalog.>) 1) AND (events.user_id OPERATOR(pg_catalog.<) 4) AND (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[4, 5])))) temp_data_queries JOIN (SELECT user_where_1_1.real_user_id FROM ((SELECT users.user_id AS real_user_id FROM public.users_table users WHERE ((users.user_id OPERATOR(pg_catalog.>) 1) AND (users.user_id OPERATOR(pg_catalog.<) 4) AND (users.value_2 OPERATOR(pg_catalog.>) 3))) user_where_1_1 JOIN (SELECT intermediate_result.user_id, intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) user_where_1_join_1 ON ((user_where_1_1.real_user_id OPERATOR(pg_catalog.>=) user_where_1_join_1.user_id)))) user_filters_1 ON ((temp_data_queries.user_id OPERATOR(pg_catalog.=) user_filters_1.real_user_id)))) "eventQuery") "pushedDownQuery_1") "pushedDownQuery" GROUP BY generated_group_field ORDER BY generated_group_field DESC, (count(*)) DESC + value | generated_group_field +--------------------------------------------------------------------- 1 | 5 2 | 2 2 | 1 @@ -1869,8 +1869,8 @@ FROM ) segmentalias_1) "tempQuery" GROUP BY "value_3" ORDER BY cnt, value_3 DESC LIMIT 10; - value_3 | cnt ----------+----- + value_3 | cnt +--------------------------------------------------------------------- 0 | 7 10 | 21 4 | 21 @@ -1920,10 +1920,10 @@ FROM ) segmentalias_1) "tempQuery" GROUP BY "value_3" ORDER BY cnt, value_3 DESC LIMIT 10; -DEBUG: generating subplan 69_1 for subquery SELECT user_id FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_2 OPERATOR(pg_catalog.>) 3)) -DEBUG: Plan 69 query after replacing subqueries and CTEs: SELECT value_3, count(*) AS cnt FROM (SELECT segmentalias_1.value_3, segmentalias_1.user_id, random() AS random FROM (SELECT users_in_segment_1.user_id, users_in_segment_1.value_3 FROM ((SELECT all_buckets_1.user_id, (all_buckets_1.value_3 OPERATOR(pg_catalog.*) (2)::double precision) AS value_3 FROM (SELECT simple_user_where_1.user_id, simple_user_where_1.value_3 FROM (SELECT users.user_id, users.value_3 FROM public.users_table users WHERE ((users.user_id OPERATOR(pg_catalog.>) 1) AND (users.user_id OPERATOR(pg_catalog.<) 4) AND (users.value_2 OPERATOR(pg_catalog.>) 2))) simple_user_where_1) all_buckets_1) users_in_segment_1 JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('69_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) some_users_data ON (true))) segmentalias_1) "tempQuery" GROUP BY value_3 ORDER BY (count(*)), value_3 DESC LIMIT 10 - value_3 | cnt ----------+----- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_2 OPERATOR(pg_catalog.>) 3)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT value_3, count(*) AS cnt FROM (SELECT segmentalias_1.value_3, segmentalias_1.user_id, random() AS random FROM (SELECT users_in_segment_1.user_id, users_in_segment_1.value_3 FROM ((SELECT all_buckets_1.user_id, (all_buckets_1.value_3 OPERATOR(pg_catalog.*) (2)::double precision) AS value_3 FROM (SELECT simple_user_where_1.user_id, simple_user_where_1.value_3 FROM (SELECT users.user_id, users.value_3 FROM public.users_table users WHERE ((users.user_id OPERATOR(pg_catalog.>) 1) AND (users.user_id OPERATOR(pg_catalog.<) 4) AND (users.value_2 OPERATOR(pg_catalog.>) 2))) simple_user_where_1) all_buckets_1) users_in_segment_1 JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) some_users_data ON (true))) segmentalias_1) "tempQuery" GROUP BY value_3 ORDER BY (count(*)), value_3 DESC LIMIT 10 + value_3 | cnt +--------------------------------------------------------------------- 0 | 14 10 | 42 4 | 42 @@ -1977,8 +1977,8 @@ LIMIT 10) "some_users" ORDER BY value_3 DESC, user_id ASC LIMIT 10; - user_id | value_3 ----------+--------- + user_id | value_3 +--------------------------------------------------------------------- 2 | 5 2 | 5 2 | 5 @@ -2031,8 +2031,8 @@ FROM ORDER BY value_3 DESC, user_id ASC LIMIT 10; - user_id | value_3 ----------+--------- + user_id | value_3 +--------------------------------------------------------------------- 2 | 5 2 | 5 2 | 5 @@ -2086,8 +2086,8 @@ FROM ORDER BY value_3 DESC, user_id DESC LIMIT 10; - user_id | value_3 ----------+--------- + user_id | value_3 +--------------------------------------------------------------------- 3 | 5 3 | 5 3 | 5 @@ -2139,8 +2139,8 @@ FROM ) "some_users_data" ON TRUE ORDER BY value_3 DESC, user_id DESC LIMIT 10; - user_id | value_3 ----------+--------- + user_id | value_3 +--------------------------------------------------------------------- 3 | 5 3 | 5 3 | 5 @@ -2190,8 +2190,8 @@ count(*) AS cnt, "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; - cnt | generated_group_field ------+----------------------- + cnt | generated_group_field +--------------------------------------------------------------------- 336 | 2 210 | 1 210 | 3 @@ -2218,8 +2218,8 @@ GROUP BY ORDER BY cnt DESC, user_id DESC LIMIT 10; - cnt | user_id ------+--------- + cnt | user_id +--------------------------------------------------------------------- 11 | 3 10 | 2 8 | 4 @@ -2267,8 +2267,8 @@ FROM ORDER BY value_2 DESC, user_id DESC LIMIT 10; - user_id | value_2 ----------+--------- + user_id | value_2 +--------------------------------------------------------------------- 2 | 5 (1 row) @@ -2377,8 +2377,8 @@ GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 449 1 | 433 (2 rows) @@ -2444,8 +2444,8 @@ GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 449 1 | 425 2 | 75 @@ -2510,8 +2510,8 @@ GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 449 1 | 433 2 | 75 @@ -2571,8 +2571,8 @@ GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 4 1 | 8 2 | 1 @@ -2608,8 +2608,8 @@ FROM ( ORDER BY 1,2,3,4 LIMIT 5; - uid | event_type | value_2 | value_3 ------+------------+---------+--------- + uid | event_type | value_2 | value_3 +--------------------------------------------------------------------- 1 | 1 | 0 | 2 1 | 1 | 0 | 2 1 | 1 | 0 | 2 @@ -2645,12 +2645,12 @@ FROM USING (user_id) GROUP BY user_id ORDER BY 1, 2; - user_id | subquery_avg ----------+------------------------ + user_id | subquery_avg +--------------------------------------------------------------------- 1 | 2.3333333333333333 3 | 5.0000000000000000 4 | 1.00000000000000000000 - 5 | + 5 | (4 rows) -- see the comment for the above query @@ -2680,12 +2680,12 @@ FROM USING (user_id) GROUP BY a.user_id ORDER BY 1, 2; - user_id | subquery_avg ----------+------------------------ + user_id | subquery_avg +--------------------------------------------------------------------- 1 | 2.3333333333333333 3 | 5.0000000000000000 4 | 1.00000000000000000000 - 5 | + 5 | (4 rows) -- queries where column aliases are used @@ -2697,8 +2697,8 @@ FROM ( FROM (users_table JOIN events_table USING (user_id)) k (k1, k2, k3)) l ORDER BY k1 LIMIT 5; - k1 ----- + k1 +--------------------------------------------------------------------- 1 1 1 @@ -2712,8 +2712,8 @@ FROM ( FROM (users_table JOIN events_table USING (user_id)) k (k1, k2, k3)) l ORDER BY k1 LIMIT 5; - k1 ----- + k1 +--------------------------------------------------------------------- 1 2 3 @@ -2725,8 +2725,8 @@ SELECT x1, x3, value_2 FROM (users_table u FULL JOIN events_table e ON (u.user_id = e.user_id)) k(x1, x2, x3, x4, x5) ORDER BY 1, 2, 3 LIMIT 5; - x1 | x3 | value_2 -----+----+--------- + x1 | x3 | value_2 +--------------------------------------------------------------------- 1 | 1 | 1 1 | 1 | 1 1 | 1 | 1 @@ -2738,8 +2738,8 @@ SELECT x1, x3, value_2 FROM (users_table u FULL JOIN events_table e USING (user_id)) k(x1, x2, x3, x4, x5) ORDER BY 1, 2, 3 LIMIT 5; - x1 | x3 | value_2 -----+----+--------- + x1 | x3 | value_2 +--------------------------------------------------------------------- 1 | 1 | 1 1 | 1 | 1 1 | 1 | 1 @@ -2752,8 +2752,8 @@ FROM (users_table LEFT OUTER JOIN events_table ON (users_table.user_id = events INNER JOIN users_table as u2 ON (test.c_custkey = u2.user_id) ORDER BY 1 DESC LIMIT 10; - c_custkey ------------ + c_custkey +--------------------------------------------------------------------- 6 6 6 @@ -2772,8 +2772,8 @@ FROM (users_table LEFT OUTER JOIN events_table ON (users_table.user_id = events GROUP BY 1 ORDER BY 2, 1 LIMIT 10; - c_custkey | date_trunc ------------+-------------------------- + c_custkey | date_trunc +--------------------------------------------------------------------- 2 | Thu Nov 23 13:52:00 2017 6 | Thu Nov 23 14:43:00 2017 4 | Thu Nov 23 15:32:00 2017 @@ -2789,8 +2789,8 @@ GROUP BY 1 HAVING extract(minute from max(c_nationkey)) >= 45 ORDER BY 2, 1 LIMIT 10; - c_custkey | date_trunc ------------+-------------------------- + c_custkey | date_trunc +--------------------------------------------------------------------- 2 | Thu Nov 23 13:52:00 2017 5 | Thu Nov 23 16:48:00 2017 (2 rows) @@ -2800,8 +2800,8 @@ FROM (users_table JOIN events_table USING (user_id)) AS test(user_id, c_nationke FULL JOIN users_table AS u2 USING (user_id) ORDER BY 1 DESC LIMIT 10; - user_id ---------- + user_id +--------------------------------------------------------------------- 6 6 6 @@ -2824,8 +2824,8 @@ FROM ((users_table GROUP BY 1,2 ORDER BY 2 DESC, 1 DESC LIMIT 10; - bar | value_3 ------+--------- + bar | value_3 +--------------------------------------------------------------------- 3 | 5 2 | 5 1 | 5 @@ -2852,8 +2852,8 @@ JOIN LATERAL GROUP BY 1, 2 ORDER BY 2 DESC, 1 DESC LIMIT 10; - bar | value_3 ------+--------- + bar | value_3 +--------------------------------------------------------------------- 3 | 5 2 | 5 1 | 5 @@ -2881,8 +2881,8 @@ SELECT bar, foo.value_3, c_custkey, test_2.time_2 FROM ON (users_table.user_id = deeper_join_2.user_id_deep)) AS test_2(c_custkey, time_2) WHERE foo.bar = test_2.c_custkey ORDER BY 2 DESC, 1 DESC, 3 DESC, 4 DESC LIMIT 10; - bar | value_3 | c_custkey | time_2 ------+---------+-----------+--------------------------------- + bar | value_3 | c_custkey | time_2 +--------------------------------------------------------------------- 3 | 5 | 3 | Thu Nov 23 17:18:51.048758 2017 3 | 5 | 3 | Thu Nov 23 17:18:51.048758 2017 3 | 5 | 3 | Thu Nov 23 17:18:51.048758 2017 diff --git a/src/test/regress/expected/multi_subquery_complex_reference_clause.out b/src/test/regress/expected/multi_subquery_complex_reference_clause.out index e1977446d..1427e6aea 100644 --- a/src/test/regress/expected/multi_subquery_complex_reference_clause.out +++ b/src/test/regress/expected/multi_subquery_complex_reference_clause.out @@ -7,9 +7,9 @@ -- SET citus.next_shard_id TO 1400000; CREATE TABLE user_buy_test_table(user_id int, item_id int, buy_count int); SELECT create_distributed_table('user_buy_test_table', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO user_buy_test_table VALUES(1,2,1); @@ -18,9 +18,9 @@ INSERT INTO user_buy_test_table VALUES(3,4,2); INSERT INTO user_buy_test_table VALUES(7,5,2); CREATE TABLE users_return_test_table(user_id int, item_id int, buy_count int); SELECT create_distributed_table('users_return_test_table', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO users_return_test_table VALUES(4,1,1); @@ -30,8 +30,8 @@ INSERT INTO users_return_test_table VALUES(3,2,2); SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN users_ref_test_table ON user_buy_test_table.user_id = users_ref_test_table.id) subquery_1; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) @@ -39,8 +39,8 @@ SELECT count(*) FROM SELECT count(*) FROM (SELECT random(), k_no FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.user_id = users_ref_test_table.id) subquery_1 WHERE k_no = 47; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -49,8 +49,8 @@ SELECT subquery_1.item_id FROM (SELECT user_buy_test_table.item_id, random() FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.item_id = users_ref_test_table.id) subquery_1 ORDER BY 1; - item_id ---------- + item_id +--------------------------------------------------------------------- 2 3 4 @@ -62,8 +62,8 @@ SELECT subquery_1.user_id FROM (SELECT user_buy_test_table.user_id, random() FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.user_id > users_ref_test_table.id) subquery_1 ORDER BY 1; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -86,8 +86,8 @@ DETAIL: There exist a reference table in the outer part of the outer join SELECT count(*) FROM (SELECT random() FROM users_ref_test_table RIGHT JOIN user_buy_test_table ON user_buy_test_table.user_id = users_ref_test_table.id) subquery_1; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -101,8 +101,8 @@ DETAIL: There exist a reference table in the outer part of the outer join SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN users_ref_test_table ON user_buy_test_table.item_id = users_ref_test_table.id) subquery_1; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -110,8 +110,8 @@ SELECT count(*) FROM SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN users_ref_test_table ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) @@ -119,8 +119,8 @@ SELECT count(*) FROM SELECT count(*) FROM (SELECT random() FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) @@ -131,8 +131,8 @@ SELECT count(*) FROM LEFT JOIN (SELECT tt1.user_id, random() FROM user_buy_test_table as tt1 LEFT JOIN users_ref_test_table as ref ON tt1.user_id = ref.id) subquery_2 ON subquery_1.user_id = subquery_2.user_id; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -144,8 +144,8 @@ SELECT count(*) FROM (SELECT DISTINCT user_buy_test_table.user_id, random() FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.user_id > users_ref_test_table.id AND users_ref_test_table.k_no > 44 AND user_buy_test_table.user_id > 44) subquery_2 WHERE subquery_1.user_id = subquery_2.user_id ; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -165,8 +165,8 @@ SELECT subquery_2.id ON tt1.user_id = ref.id) subquery_2 ON subquery_1.user_id = subquery_2.user_id ORDER BY 1 DESC LIMIT 5; - id ----- + id +--------------------------------------------------------------------- 3 2 1 @@ -203,8 +203,8 @@ FROM INNER JOIN events_reference_table ON (events_reference_table.value_2 = users_table.user_id) ) as foo GROUP BY user_id ORDER BY 2 DESC LIMIT 10; - user_id | sum ----------+------- + user_id | sum +--------------------------------------------------------------------- 2 | 31248 3 | 15120 4 | 14994 @@ -223,8 +223,8 @@ FROM INNER JOIN (SELECT *, random() FROM events_reference_table) as ref_all ON (ref_all.value_2 = users_table.user_id) ) as foo GROUP BY user_id ORDER BY 2 DESC LIMIT 10; - user_id | sum ----------+------- + user_id | sum +--------------------------------------------------------------------- 2 | 31248 3 | 15120 4 | 14994 @@ -236,16 +236,16 @@ FROM SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN generate_series(1,10) AS users_ref_test_table(id) ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) -- table function cannot be used without subquery pushdown SELECT count(*) FROM user_buy_test_table JOIN generate_series(1,10) AS users_ref_test_table(id) ON user_buy_test_table.item_id = users_ref_test_table.id; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -253,15 +253,15 @@ SELECT count(*) FROM user_buy_test_table JOIN generate_series(1,10) AS users_ref SELECT count(*) FROM (SELECT random() FROM user_buy_test_table LEFT JOIN generate_series(1,10) AS users_ref_test_table(id) ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1; - count -------- + count +--------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM user_buy_test_table LEFT JOIN (SELECT * FROM generate_series(1,10) id) users_ref_test_table ON user_buy_test_table.item_id = users_ref_test_table.id; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -281,11 +281,11 @@ SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN random() AS users_ref_test_table(id) ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1; DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 30_1 for subquery SELECT id FROM random() users_ref_test_table(id) -DEBUG: Plan 30 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT random() AS random FROM (public.user_buy_test_table JOIN (SELECT intermediate_result.id FROM read_intermediate_result('30_1'::text, 'binary'::citus_copy_format) intermediate_result(id double precision)) users_ref_test_table(id) ON (((user_buy_test_table.item_id)::double precision OPERATOR(pg_catalog.>) users_ref_test_table.id)))) subquery_1 +DEBUG: generating subplan XXX_1 for subquery SELECT id FROM random() users_ref_test_table(id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT random() AS random FROM (public.user_buy_test_table JOIN (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id double precision)) users_ref_test_table(id) ON (((user_buy_test_table.item_id)::double precision OPERATOR(pg_catalog.>) users_ref_test_table.id)))) subquery_1 DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -295,11 +295,11 @@ SELECT count(*) FROM ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1 WHERE item_id = 6; DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 31_1 for subquery SELECT id FROM generate_series((random())::integer, 10) users_ref_test_table(id) -DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT user_buy_test_table.item_id FROM (public.user_buy_test_table JOIN (SELECT intermediate_result.id FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) users_ref_test_table(id) ON ((user_buy_test_table.item_id OPERATOR(pg_catalog.>) users_ref_test_table.id)))) subquery_1 WHERE (item_id OPERATOR(pg_catalog.=) 6) +DEBUG: generating subplan XXX_1 for subquery SELECT id FROM generate_series((random())::integer, 10) users_ref_test_table(id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT user_buy_test_table.item_id FROM (public.user_buy_test_table JOIN (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) users_ref_test_table(id) ON ((user_buy_test_table.item_id OPERATOR(pg_catalog.>) users_ref_test_table.id)))) subquery_1 WHERE (item_id OPERATOR(pg_catalog.=) 6) DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -310,15 +310,15 @@ SELECT count(*) FROM SELECT id FROM generate_series(1,10) AS users_ref_test_table(id)) subquery_1; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 32_1 for subquery SELECT user_id FROM public.user_buy_test_table +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.user_buy_test_table DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 32_2 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('32_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION ALL SELECT users_ref_test_table.id FROM generate_series(1, 10) users_ref_test_table(id) -DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('32_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) subquery_1 +DEBUG: generating subplan XXX_2 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION ALL SELECT users_ref_test_table.id FROM generate_series(1, 10) users_ref_test_table(id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) subquery_1 DEBUG: Creating router plan DEBUG: Plan is router executable - count -------- + count +--------------------------------------------------------------------- 14 (1 row) @@ -327,24 +327,24 @@ RESET client_min_messages; SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN (SELECT 4 AS id) users_ref_test_table ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- subquery without FROM triggers subquery pushdown SELECT count(*) FROM user_buy_test_table JOIN (SELECT 5 AS id) users_ref_test_table ON user_buy_test_table.item_id = users_ref_test_table.id; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- subquery without FROM can be the inner relationship in an outer join SELECT count(*) FROM user_buy_test_table LEFT JOIN (SELECT 5 AS id) users_ref_test_table ON user_buy_test_table.item_id = users_ref_test_table.id; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) @@ -362,15 +362,15 @@ SELECT count(*) FROM SELECT id FROM (SELECT 5 AS id) users_ref_test_table) subquery_1; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 39_1 for subquery SELECT user_id FROM public.user_buy_test_table +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.user_buy_test_table DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 39_2 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('39_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION ALL SELECT users_ref_test_table.id FROM (SELECT 5 AS id) users_ref_test_table -DEBUG: Plan 39 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('39_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) subquery_1 +DEBUG: generating subplan XXX_2 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION ALL SELECT users_ref_test_table.id FROM (SELECT 5 AS id) users_ref_test_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) subquery_1 DEBUG: Creating router plan DEBUG: Plan is router executable - count -------- + count +--------------------------------------------------------------------- 5 (1 row) @@ -383,15 +383,15 @@ SELECT * FROM ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 42_1 for subquery SELECT user_id FROM public.user_buy_test_table +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.user_buy_test_table DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 42_2 for subquery SELECT users_ref_test_table.id FROM public.users_ref_test_table UNION SELECT intermediate_result.user_id FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) -DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT id FROM (SELECT intermediate_result.id FROM read_intermediate_result('42_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) sub ORDER BY id DESC +DEBUG: generating subplan XXX_2 for subquery SELECT users_ref_test_table.id FROM public.users_ref_test_table UNION SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) sub ORDER BY id DESC DEBUG: Creating router plan DEBUG: Plan is router executable - id ----- + id +--------------------------------------------------------------------- 7 6 5 @@ -409,15 +409,15 @@ SELECT * FROM ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 45_1 for subquery SELECT user_id, (random() OPERATOR(pg_catalog.*) (0)::double precision) FROM (SELECT user_buy_test_table.user_id FROM public.user_buy_test_table) sub2 +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, (random() OPERATOR(pg_catalog.*) (0)::double precision) FROM (SELECT user_buy_test_table.user_id FROM public.user_buy_test_table) sub2 DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 45_2 for subquery SELECT sub1.id, (random() OPERATOR(pg_catalog.*) (0)::double precision) FROM (SELECT users_ref_test_table.id FROM public.users_ref_test_table) sub1 UNION SELECT intermediate_result.user_id, intermediate_result."?column?" FROM read_intermediate_result('45_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "?column?" double precision) -DEBUG: Plan 45 query after replacing subqueries and CTEs: SELECT id, "?column?" FROM (SELECT intermediate_result.id, intermediate_result."?column?" FROM read_intermediate_result('45_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer, "?column?" double precision)) sub ORDER BY id DESC +DEBUG: generating subplan XXX_2 for subquery SELECT sub1.id, (random() OPERATOR(pg_catalog.*) (0)::double precision) FROM (SELECT users_ref_test_table.id FROM public.users_ref_test_table) sub1 UNION SELECT intermediate_result.user_id, intermediate_result."?column?" FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "?column?" double precision) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT id, "?column?" FROM (SELECT intermediate_result.id, intermediate_result."?column?" FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer, "?column?" double precision)) sub ORDER BY id DESC DEBUG: Creating router plan DEBUG: Plan is router executable - id | ?column? -----+---------- + id | ?column? +--------------------------------------------------------------------- 7 | 0 6 | 0 5 | 0 @@ -436,8 +436,8 @@ SELECT * FROM SELECT user_id FROM user_buy_test_table) sub ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries - user_id ---------- + user_id +--------------------------------------------------------------------- 7 3 2 @@ -452,8 +452,8 @@ SELECT * FROM SELECT user_id, random() * 0 FROM (SELECT user_id FROM user_buy_test_table) sub2) sub ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries - user_id | ?column? ----------+---------- + user_id | ?column? +--------------------------------------------------------------------- 7 | 0 3 | 0 2 | 0 @@ -476,8 +476,8 @@ SELECT * FROM SELECT user_id FROM user_buy_test_table WHERE user_id in (select id from users_ref_test_table)) sub ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries - user_id ---------- + user_id +--------------------------------------------------------------------- 3 2 1 @@ -493,8 +493,8 @@ SELECT * FROM SELECT user_id FROM user_buy_test_table) sub ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries - user_id ---------- + user_id +--------------------------------------------------------------------- 7 3 2 @@ -526,8 +526,8 @@ FROM LEFT JOIN events_reference_table ON (events_reference_table.value_2 = users_table.user_id) ) as foo GROUP BY user_id ORDER BY 2 DESC LIMIT 10; - user_id | sum ----------+------- + user_id | sum +--------------------------------------------------------------------- 2 | 31248 3 | 15120 4 | 14994 @@ -594,8 +594,8 @@ SELECT * FROM user_id > 2 and value_2 = 1) as foo_in ON (event_val_2 = user_id)) as foo LEFT JOIN (SELECT user_id as user_user_id FROM users_table) as fooo ON (user_id = user_user_id)) as bar ORDER BY 1; - user_id ---------- + user_id +--------------------------------------------------------------------- 3 4 5 @@ -631,8 +631,8 @@ FROM FROM events_reference_table INNER JOIN users_table ON (users_table.user_id = events_reference_table.user_id) GROUP BY users_table.user_id) AS events_all LEFT JOIN events_table ON (events_all.usr_id = events_table.user_id) GROUP BY 2 ORDER BY 1 DESC, 2 DESC LIMIT 5; - max | usr_id ------+-------- + max | usr_id +--------------------------------------------------------------------- 432 | 2 391 | 4 364 | 5 @@ -715,8 +715,8 @@ FROM ORDER BY user_id DESC LIMIT 10; - user_id | lastseen ----------+--------------------------------- + user_id | lastseen +--------------------------------------------------------------------- 1 | Thu Nov 23 21:54:46.924477 2017 1 | Thu Nov 23 21:54:46.924477 2017 1 | Thu Nov 23 21:54:46.924477 2017 @@ -772,8 +772,8 @@ GROUP BY "generated_group_field" ORDER BY generated_group_field DESC, value DESC; - value | generated_group_field --------+----------------------- + value | generated_group_field +--------------------------------------------------------------------- 2 | 5 1 | 3 3 | 2 @@ -818,8 +818,8 @@ FROM ) segmentalias_1) "tempQuery" GROUP BY "value_3" ORDER BY cnt, value_3 DESC LIMIT 10; - value_3 | cnt ----------+----- + value_3 | cnt +--------------------------------------------------------------------- 0 | 7 10 | 21 4 | 21 @@ -871,8 +871,8 @@ LIMIT 10) "some_users" ORDER BY value_3 DESC LIMIT 10; - user_id | value_3 ----------+--------- + user_id | value_3 +--------------------------------------------------------------------- 3 | 5 3 | 5 3 | 5 @@ -920,8 +920,8 @@ count(*) AS cnt, "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; - cnt | generated_group_field ------+----------------------- + cnt | generated_group_field +--------------------------------------------------------------------- 336 | 2 210 | 1 210 | 3 @@ -999,8 +999,8 @@ FROM ( GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id; - user_id | sum | length | hasdone_event ----------+-----+--------+---------------- + user_id | sum | length | hasdone_event +--------------------------------------------------------------------- 2 | 72 | 14 | Has done event 3 | 238 | 14 | Has done event | 1 | 14 | Has done event @@ -1034,8 +1034,8 @@ FROM ( GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id; - user_id | sum | length | hasdone_event ----------+-----+--------+---------------- + user_id | sum | length | hasdone_event +--------------------------------------------------------------------- 1 | 55 | 14 | Has done event 2 | 88 | 14 | Has done event 3 | 83 | 14 | Has done event @@ -1080,8 +1080,8 @@ count(*) AS cnt, "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; - cnt | generated_group_field ------+----------------------- + cnt | generated_group_field +--------------------------------------------------------------------- 737 | 5 679 | 1 591 | 2 @@ -1184,8 +1184,8 @@ INNER JOIN ORDER BY types LIMIT 5; - types -------- + types +--------------------------------------------------------------------- 0 0 0 @@ -1264,8 +1264,8 @@ GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 217 2 | 191 3 | 31 @@ -1324,8 +1324,8 @@ INNER JOIN WHERE value_1 > 2 and value_1 < 4) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 191 1 | 191 2 | 31 @@ -1347,10 +1347,10 @@ SELECT count(*) FROM (SELECT user_buy_test_table.user_id, random() FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.user_id > users_ref_test_table.id) subquery_2 WHERE subquery_1.user_id != subquery_2.user_id ; -DEBUG: generating subplan 84_1 for subquery SELECT user_buy_test_table.user_id, random() AS random FROM (public.user_buy_test_table LEFT JOIN public.users_ref_test_table ON ((user_buy_test_table.user_id OPERATOR(pg_catalog.>) users_ref_test_table.id))) -DEBUG: Plan 84 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT user_buy_test_table.user_id, random() AS random FROM (public.user_buy_test_table LEFT JOIN public.users_ref_test_table ON ((user_buy_test_table.item_id OPERATOR(pg_catalog.>) users_ref_test_table.id)))) subquery_1, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('84_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) subquery_2 WHERE (subquery_1.user_id OPERATOR(pg_catalog.<>) subquery_2.user_id) - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_buy_test_table.user_id, random() AS random FROM (public.user_buy_test_table LEFT JOIN public.users_ref_test_table ON ((user_buy_test_table.user_id OPERATOR(pg_catalog.>) users_ref_test_table.id))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT user_buy_test_table.user_id, random() AS random FROM (public.user_buy_test_table LEFT JOIN public.users_ref_test_table ON ((user_buy_test_table.item_id OPERATOR(pg_catalog.>) users_ref_test_table.id)))) subquery_1, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) subquery_2 WHERE (subquery_1.user_id OPERATOR(pg_catalog.<>) subquery_2.user_id) + count +--------------------------------------------------------------------- 67 (1 row) @@ -1393,8 +1393,8 @@ count(*) AS cnt, "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; -DEBUG: generating subplan 86_1 for subquery SELECT user_id, value_2 AS generated_group_field FROM public.users_table users -DEBUG: Plan 86 query after replacing subqueries and CTEs: SELECT count(*) AS cnt, generated_group_field FROM (SELECT "eventQuery".user_id, random() AS random, "eventQuery".generated_group_field FROM (SELECT multi_group_wrapper_1."time", multi_group_wrapper_1.event_user_id, multi_group_wrapper_1.user_id, left_group_by_1.generated_group_field, random() AS random FROM ((SELECT temp_data_queries."time", temp_data_queries.event_user_id, user_filters_1.user_id FROM ((SELECT events."time", events.user_id AS event_user_id FROM public.events_table events WHERE (events.user_id OPERATOR(pg_catalog.>) 2)) temp_data_queries JOIN (SELECT users.user_id FROM public.users_reference_table users WHERE ((users.user_id OPERATOR(pg_catalog.>) 2) AND (users.value_2 OPERATOR(pg_catalog.=) 5))) user_filters_1 ON ((temp_data_queries.event_user_id OPERATOR(pg_catalog.<) user_filters_1.user_id)))) multi_group_wrapper_1 RIGHT JOIN (SELECT intermediate_result.user_id, intermediate_result.generated_group_field FROM read_intermediate_result('86_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, generated_group_field integer)) left_group_by_1 ON ((left_group_by_1.user_id OPERATOR(pg_catalog.>) multi_group_wrapper_1.event_user_id)))) "eventQuery") "pushedDownQuery" GROUP BY generated_group_field ORDER BY (count(*)) DESC, generated_group_field LIMIT 10 +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, value_2 AS generated_group_field FROM public.users_table users +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS cnt, generated_group_field FROM (SELECT "eventQuery".user_id, random() AS random, "eventQuery".generated_group_field FROM (SELECT multi_group_wrapper_1."time", multi_group_wrapper_1.event_user_id, multi_group_wrapper_1.user_id, left_group_by_1.generated_group_field, random() AS random FROM ((SELECT temp_data_queries."time", temp_data_queries.event_user_id, user_filters_1.user_id FROM ((SELECT events."time", events.user_id AS event_user_id FROM public.events_table events WHERE (events.user_id OPERATOR(pg_catalog.>) 2)) temp_data_queries JOIN (SELECT users.user_id FROM public.users_reference_table users WHERE ((users.user_id OPERATOR(pg_catalog.>) 2) AND (users.value_2 OPERATOR(pg_catalog.=) 5))) user_filters_1 ON ((temp_data_queries.event_user_id OPERATOR(pg_catalog.<) user_filters_1.user_id)))) multi_group_wrapper_1 RIGHT JOIN (SELECT intermediate_result.user_id, intermediate_result.generated_group_field FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, generated_group_field integer)) left_group_by_1 ON ((left_group_by_1.user_id OPERATOR(pg_catalog.>) multi_group_wrapper_1.event_user_id)))) "eventQuery") "pushedDownQuery" GROUP BY generated_group_field ORDER BY (count(*)) DESC, generated_group_field LIMIT 10 ERROR: cannot pushdown the subquery DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer join RESET client_min_messages; @@ -1426,8 +1426,8 @@ SELECT foo.user_id FROM SELECT m.user_id, random() FROM users_table m JOIN events_reference_table r ON int4eq(m.user_id, r.user_id) WHERE event_type > 100 ) as foo; - user_id ---------- + user_id +--------------------------------------------------------------------- (0 rows) -- not pushdownable since group by is on the reference table column @@ -1438,8 +1438,8 @@ SELECT foo.user_id FROM GROUP BY r.user_id ) as foo ORDER BY 1 DESC; - user_id ---------- + user_id +--------------------------------------------------------------------- 6 5 4 @@ -1456,8 +1456,8 @@ SELECT foo.user_id FROM GROUP BY r.user_id, m.user_id ) as foo ORDER BY 1 LIMIT 3; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -1471,8 +1471,8 @@ SELECT foo.user_id FROM ) as foo ORDER BY 1 DESC LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 6 6 6 @@ -1486,8 +1486,8 @@ SELECT foo.user_id FROM ( SELECT DISTINCT ON(r.user_id) r.user_id, random() FROM users_table m JOIN events_reference_table r ON int4eq(m.user_id, r.user_id) ) as foo; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -1502,8 +1502,8 @@ SELECT foo.user_id FROM SELECT DISTINCT ON(r.user_id, m.user_id) r.user_id, random() FROM users_table m JOIN events_reference_table r ON int4eq(m.user_id, r.user_id) ) as foo ORDER BY 1 LIMIT 3; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -1519,8 +1519,8 @@ FROM ORDER BY time DESC LIMIT 5 OFFSET 0; - distinct_users | event_type | time -----------------+------------+--------------------------------- + distinct_users | event_type | time +--------------------------------------------------------------------- 1 | 6 | Thu Nov 23 21:54:46.924477 2017 4 | 1 | Thu Nov 23 18:10:21.338399 2017 3 | 2 | Thu Nov 23 18:08:26.550729 2017 @@ -1539,8 +1539,8 @@ ON (events_dist.user_id = users_ref.distinct_users) ORDER BY time DESC LIMIT 5 OFFSET 0; - distinct_users | event_type | time -----------------+------------+--------------------------------- + distinct_users | event_type | time +--------------------------------------------------------------------- 1 | 6 | Thu Nov 23 21:54:46.924477 2017 4 | 1 | Thu Nov 23 18:10:21.338399 2017 3 | 2 | Thu Nov 23 18:08:26.550729 2017 @@ -1557,8 +1557,8 @@ FROM ORDER BY time DESC LIMIT 5 OFFSET 0; - distinct_users | event_type | time -----------------+------------+--------------------------------- + distinct_users | event_type | time +--------------------------------------------------------------------- 1 | 6 | Thu Nov 23 21:54:46.924477 2017 4 | 1 | Thu Nov 23 18:10:21.338399 2017 3 | 2 | Thu Nov 23 18:08:26.550729 2017 @@ -1573,8 +1573,8 @@ SELECT * FROM ( SELECT DISTINCT users_reference_table.user_id FROM users_reference_table, events_table WHERE users_reference_table.user_id = events_table.value_4 ) as foo; - user_id ---------- + user_id +--------------------------------------------------------------------- (0 rows) SELECT * FROM @@ -1582,8 +1582,8 @@ SELECT * FROM SELECT users_reference_table.user_id FROM users_reference_table, events_table WHERE users_reference_table.user_id = events_table.value_4 GROUP BY 1 ) as foo; - user_id ---------- + user_id +--------------------------------------------------------------------- (0 rows) -- similiar to the above examples, this time there is a subquery @@ -1593,8 +1593,8 @@ SELECT * FROM SELECT DISTINCT users_reference_table.user_id FROM users_reference_table, (SELECT user_id, random() FROM events_table) as us_events WHERE users_reference_table.user_id = us_events.user_id ) as foo ORDER BY 1; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -1610,8 +1610,8 @@ SELECT * FROM ) as foo ORDER BY 1 DESC LIMIT 4; - user_id | user_id ----------+--------- + user_id | user_id +--------------------------------------------------------------------- 6 | 6 5 | 5 4 | 4 @@ -1633,19 +1633,19 @@ SELECT * FROM ) as foo ORDER BY 1 DESC LIMIT 4; - user_id | value_4 ----------+--------- - 6 | - 5 | - 4 | - 3 | + user_id | value_4 +--------------------------------------------------------------------- + 6 | + 5 | + 4 | + 3 | (4 rows) -- test the read_intermediate_result() for GROUP BYs BEGIN; SELECT broadcast_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,200) s'); - broadcast_intermediate_result -------------------------------- + broadcast_intermediate_result +--------------------------------------------------------------------- 200 (1 row) @@ -1663,8 +1663,8 @@ GROUP BY res.val_square) squares ON (mx = user_id) ORDER BY 1 LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -1680,8 +1680,8 @@ JOIN FROM read_intermediate_result('squares', 'binary') AS res (val int, val_square int)) squares ON (mx = user_id) ORDER BY 1 LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -1704,8 +1704,8 @@ squares ON (mx = user_id) ORDER BY 1 LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -1729,8 +1729,8 @@ GROUP BY res2.val_square) squares ON (mx = user_id) ORDER BY 1 LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 4 (2 rows) @@ -1753,8 +1753,8 @@ FROM ON (mx = user_id) ORDER BY 1 LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -1778,8 +1778,8 @@ JOIN ON (mx = user_id) ORDER BY 1 LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 4 (2 rows) @@ -1798,8 +1798,8 @@ FROM ON (mx = user_id) ORDER BY 1 LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 5 6 (2 rows) @@ -1822,8 +1822,8 @@ JOIN ON (mx = user_id) ORDER BY 1 LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 6 (1 row) diff --git a/src/test/regress/expected/multi_subquery_in_where_clause.out b/src/test/regress/expected/multi_subquery_in_where_clause.out index f72b7adf0..a7cac9cbc 100644 --- a/src/test/regress/expected/multi_subquery_in_where_clause.out +++ b/src/test/regress/expected/multi_subquery_in_where_clause.out @@ -24,8 +24,8 @@ GROUP BY user_id HAVING count(*) > 2 ORDER BY user_id LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 5 6 @@ -52,8 +52,8 @@ GROUP BY user_id HAVING count(*) > 1 ORDER BY user_id LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 5 6 @@ -94,8 +94,8 @@ GROUP BY ORDER BY 1 DESC LIMIT 3; - user_id ---------- + user_id +--------------------------------------------------------------------- 4 3 2 @@ -117,8 +117,8 @@ WHERE e1.user_id = e2.user_id ) ORDER BY 1; - user_id ---------- + user_id +--------------------------------------------------------------------- 2 2 2 @@ -156,8 +156,8 @@ WHERE GROUP BY 1 HAVING count(*) > 2 ORDER BY 1; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -173,8 +173,8 @@ FROM users_table WHERE user_id =ANY(SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1 <= 2) GROUP BY 1 ORDER BY 2 DESC LIMIT 5; - user_id | count ----------+------- + user_id | count +--------------------------------------------------------------------- 5 | 26 4 | 23 2 | 18 @@ -200,8 +200,8 @@ GROUP BY user_id ORDER BY user_id; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -273,8 +273,8 @@ SELECT user_id, value_2 FROM users_table WHERE HAVING sum(submit_card_info) > 0 ) ORDER BY 1, 2; - user_id | value_2 ----------+--------- + user_id | value_2 +--------------------------------------------------------------------- 2 | 2 2 | 2 2 | 4 @@ -363,8 +363,8 @@ WHERE GROUP BY user_id HAVING count(*) > 1 AND sum(value_2) > 29 ORDER BY 1; - user_id ---------- + user_id +--------------------------------------------------------------------- 2 3 (2 rows) @@ -396,8 +396,8 @@ FROM ( GROUP BY user_id ) q ORDER BY 2 DESC, 1; - user_id | array_length ----------+-------------- + user_id | array_length +--------------------------------------------------------------------- 5 | 364 (1 row) @@ -572,8 +572,8 @@ WHERE (SELECT 1) ORDER BY 1 ASC LIMIT 2; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 1 (2 rows) @@ -589,8 +589,8 @@ WHERE (SELECT random()) AND user_id < 0 ORDER BY 1 ASC LIMIT 2; - user_id ---------- + user_id +--------------------------------------------------------------------- (0 rows) -- OFFSET is not supported in the subquey @@ -640,8 +640,8 @@ WHERE user_id ) as f_outer WHERE f_inner.user_id = f_outer.user_id ) ORDER BY 1 LIMIT 3; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 (1 row) @@ -652,11 +652,11 @@ FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1 <= 2) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 3 AND value_1 <= 4) AND value_2 IN (SELECT user_id FROM users_table WHERE value_1 >= 5 AND value_1 <= 6) ORDER BY 1 DESC LIMIT 3; -DEBUG: generating subplan 26_1 for subquery SELECT user_id FROM public.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 5) AND (value_1 OPERATOR(pg_catalog.<=) 6)) -DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT user_id FROM public.users_table WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT users_table_1.user_id FROM public.users_table users_table_1 WHERE ((users_table_1.value_1 OPERATOR(pg_catalog.>=) 1) AND (users_table_1.value_1 OPERATOR(pg_catalog.<=) 2)))) AND (user_id OPERATOR(pg_catalog.=) ANY (SELECT users_table_1.user_id FROM public.users_table users_table_1 WHERE ((users_table_1.value_1 OPERATOR(pg_catalog.>=) 3) AND (users_table_1.value_1 OPERATOR(pg_catalog.<=) 4)))) AND (value_2 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) ORDER BY user_id DESC LIMIT 3 +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 5) AND (value_1 OPERATOR(pg_catalog.<=) 6)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id FROM public.users_table WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT users_table_1.user_id FROM public.users_table users_table_1 WHERE ((users_table_1.value_1 OPERATOR(pg_catalog.>=) 1) AND (users_table_1.value_1 OPERATOR(pg_catalog.<=) 2)))) AND (user_id OPERATOR(pg_catalog.=) ANY (SELECT users_table_1.user_id FROM public.users_table users_table_1 WHERE ((users_table_1.value_1 OPERATOR(pg_catalog.>=) 3) AND (users_table_1.value_1 OPERATOR(pg_catalog.<=) 4)))) AND (value_2 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) ORDER BY user_id DESC LIMIT 3 DEBUG: push down of limit count: 3 - user_id ---------- + user_id +--------------------------------------------------------------------- 6 6 6 diff --git a/src/test/regress/expected/multi_subquery_in_where_reference_clause.out b/src/test/regress/expected/multi_subquery_in_where_reference_clause.out index 377d651dc..7a5f8dc2c 100644 --- a/src/test/regress/expected/multi_subquery_in_where_reference_clause.out +++ b/src/test/regress/expected/multi_subquery_in_where_reference_clause.out @@ -1,24 +1,24 @@ -- -- queries to test the subquery pushdown on reference tables -- subqueries in WHERE with IN operator -SELECT +SELECT user_id -FROM +FROM users_table -WHERE - value_2 IN - (SELECT - value_2 - FROM - events_reference_table - WHERE +WHERE + value_2 IN + (SELECT + value_2 + FROM + events_reference_table + WHERE users_table.user_id = events_reference_table.user_id ) GROUP BY user_id ORDER BY user_id LIMIT 3; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -26,39 +26,39 @@ LIMIT 3; -- subqueries in WHERE with NOT EXISTS operator, should work since -- reference table in the inner part of the join -SELECT +SELECT user_id -FROM +FROM users_table -WHERE - NOT EXISTS - (SELECT - value_2 - FROM - events_reference_table - WHERE +WHERE + NOT EXISTS + (SELECT + value_2 + FROM + events_reference_table + WHERE users_table.user_id = events_reference_table.user_id ) GROUP BY user_id ORDER BY user_id LIMIT 3; - user_id ---------- + user_id +--------------------------------------------------------------------- (0 rows) --- subqueries in WHERE with NOT EXISTS operator, should not work since +-- subqueries in WHERE with NOT EXISTS operator, should not work since -- there is a correlated subquery in WHERE clause -SELECT +SELECT user_id -FROM +FROM users_reference_table -WHERE - NOT EXISTS - (SELECT - value_2 - FROM - events_table - WHERE +WHERE + NOT EXISTS + (SELECT + value_2 + FROM + events_table + WHERE users_reference_table.user_id = events_table.user_id ) LIMIT 3; @@ -119,8 +119,8 @@ WHERE ) ORDER BY user_id LIMIT 3; - user_id ---------- + user_id +--------------------------------------------------------------------- 2 3 4 @@ -142,8 +142,8 @@ WHERE ) ORDER BY user_id LIMIT 3; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -168,24 +168,24 @@ LIMIT 3; ERROR: cannot pushdown the subquery DETAIL: There exist a reference table in the outer part of the outer join -- subqueries in WHERE with IN operator without equality -SELECT +SELECT users_table.user_id, count(*) -FROM +FROM users_table -WHERE - value_2 IN - (SELECT - value_2 - FROM - events_reference_table - WHERE +WHERE + value_2 IN + (SELECT + value_2 + FROM + events_reference_table + WHERE users_table.user_id > events_reference_table.user_id ) GROUP BY users_table.user_id ORDER BY 2 DESC, 1 DESC LIMIT 3; - user_id | count ----------+------- + user_id | count +--------------------------------------------------------------------- 5 | 26 4 | 23 3 | 17 @@ -208,8 +208,8 @@ WHERE GROUP BY users_table.user_id ORDER BY 2 DESC, 1 DESC LIMIT 3; - user_id | count ----------+------- + user_id | count +--------------------------------------------------------------------- 5 | 26 4 | 23 2 | 18 @@ -232,8 +232,8 @@ WHERE GROUP BY users_table.user_id ORDER BY 2 DESC, 1 DESC LIMIT 3; - user_id | count ----------+------- + user_id | count +--------------------------------------------------------------------- 6 | 10 (1 row) @@ -298,8 +298,8 @@ SELECT user_id, value_2 FROM users_table WHERE HAVING sum(submit_card_info) > 0 ) ORDER BY 1, 2; - user_id | value_2 ----------+--------- + user_id | value_2 +--------------------------------------------------------------------- 5 | 5 5 | 5 (2 rows) @@ -368,15 +368,15 @@ ORDER BY 1, 2; ERROR: cannot pushdown the subquery DETAIL: There exist a reference table in the outer part of the outer join -- non-partition key equality with reference table - SELECT - user_id, count(*) -FROM - users_table -WHERE - value_3 =ANY(SELECT value_2 FROM users_reference_table WHERE value_1 >= 1 AND value_1 <= 2) + SELECT + user_id, count(*) +FROM + users_table +WHERE + value_3 =ANY(SELECT value_2 FROM users_reference_table WHERE value_1 >= 1 AND value_1 <= 2) GROUP BY 1 ORDER BY 2 DESC, 1 DESC LIMIT 5; - user_id | count ----------+------- + user_id | count +--------------------------------------------------------------------- 5 | 26 4 | 23 2 | 18 @@ -385,26 +385,26 @@ WHERE (5 rows) -- non-partition key comparison with reference table -SELECT +SELECT user_id, count(*) -FROM +FROM events_table as e1 WHERE event_type IN - (SELECT + (SELECT event_type - FROM + FROM events_reference_table as e2 WHERE value_2 = 2 AND value_3 > 3 AND e1.value_2 > e2.value_2 - ) + ) GROUP BY 1 ORDER BY 2 DESC, 1 DESC LIMIT 5; - user_id | count ----------+------- + user_id | count +--------------------------------------------------------------------- 2 | 7 5 | 6 4 | 5 @@ -413,8 +413,8 @@ LIMIT 5; (5 rows) -- subqueries in both WHERE and FROM clauses --- should work since reference table is on the --- inner part of the join +-- should work since reference table is on the +-- inner part of the join SELECT user_id, value_2 FROM users_table WHERE value_1 > 1 AND value_1 < 3 AND value_2 >= 5 @@ -475,8 +475,8 @@ SELECT user_id, value_2 FROM users_table WHERE HAVING sum(submit_card_info) > 0 ) ORDER BY 1, 2; - user_id | value_2 ----------+--------- + user_id | value_2 +--------------------------------------------------------------------- 5 | 5 5 | 5 (2 rows) @@ -485,32 +485,32 @@ ORDER BY 1, 2; SET client_min_messages TO DEBUG1; -- recursively planning subqueries in WHERE clause due to recurring table in FROM SELECT - count(*) -FROM - users_reference_table -WHERE user_id + count(*) +FROM + users_reference_table +WHERE user_id NOT IN (SELECT users_table.value_2 FROM users_table JOIN users_reference_table as u2 ON users_table.value_2 = u2.value_2); -DEBUG: generating subplan 18_1 for subquery SELECT users_table.value_2 FROM (public.users_table JOIN public.users_reference_table u2 ON ((users_table.value_2 OPERATOR(pg_catalog.=) u2.value_2))) -DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_reference_table WHERE (NOT (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.value_2 FROM (public.users_table JOIN public.users_reference_table u2 ON ((users_table.value_2 OPERATOR(pg_catalog.=) u2.value_2))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_reference_table WHERE (NOT (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) + count +--------------------------------------------------------------------- 10 (1 row) -- recursively planning subqueries in WHERE clause due to recurring table in FROM SELECT count(*) FROM - (SELECT + (SELECT user_id, random() FROM users_reference_table) AS vals WHERE vals.user_id NOT IN (SELECT users_table.value_2 FROM users_table JOIN users_reference_table AS u2 ON users_table.value_2 = u2.value_2); -DEBUG: generating subplan 20_1 for subquery SELECT users_table.value_2 FROM (public.users_table JOIN public.users_reference_table u2 ON ((users_table.value_2 OPERATOR(pg_catalog.=) u2.value_2))) -DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_reference_table.user_id, random() AS random FROM public.users_reference_table) vals WHERE (NOT (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.value_2 FROM (public.users_table JOIN public.users_reference_table u2 ON ((users_table.value_2 OPERATOR(pg_catalog.=) u2.value_2))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_reference_table.user_id, random() AS random FROM public.users_reference_table) vals WHERE (NOT (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) + count +--------------------------------------------------------------------- 10 (1 row) @@ -543,28 +543,28 @@ WHERE user_id IN ORDER BY 1,2,3 LIMIT 5; DEBUG: push down of limit count: 5 - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | (5 rows) SET client_min_messages TO DEFAULT; -- not supported since GROUP BY references to an upper level query -SELECT +SELECT user_id -FROM +FROM users_table -WHERE - value_2 > - (SELECT - max(value_2) - FROM - events_reference_table - WHERE +WHERE + value_2 > + (SELECT + max(value_2) + FROM + events_reference_table + WHERE users_table.user_id = events_reference_table.user_id AND event_type = 2 GROUP BY users_table.user_id @@ -577,17 +577,17 @@ ERROR: cannot push down this subquery DETAIL: Group by list without partition column is currently unsupported when a subquery references a column from another query -- similar query with slightly more complex group by -- though the error message is a bit confusing -SELECT +SELECT user_id -FROM +FROM users_table -WHERE - value_2 > - (SELECT - max(value_2) - FROM - events_reference_table - WHERE +WHERE + value_2 > + (SELECT + max(value_2) + FROM + events_reference_table + WHERE users_table.user_id = events_reference_table.user_id AND event_type = 2 GROUP BY (users_table.user_id * 2) diff --git a/src/test/regress/expected/multi_subquery_misc.out b/src/test/regress/expected/multi_subquery_misc.out index bae9595dd..3c8abc67d 100644 --- a/src/test/regress/expected/multi_subquery_misc.out +++ b/src/test/regress/expected/multi_subquery_misc.out @@ -32,8 +32,8 @@ FROM ( ) AS shard_union ORDER BY user_lastseen DESC, user_id; EXECUTE prepared_subquery_1; - user_id | user_lastseen | array_length ----------+---------------------------------+-------------- + user_id | user_lastseen | array_length +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) @@ -66,50 +66,50 @@ FROM ( ORDER BY user_lastseen DESC, user_id; -- should be fine with more than five executions EXECUTE prepared_subquery_2(1, 3); - user_id | user_lastseen | array_length ----------+---------------------------------+-------------- + user_id | user_lastseen | array_length +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); - user_id | user_lastseen | array_length ----------+---------------------------------+-------------- + user_id | user_lastseen | array_length +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); - user_id | user_lastseen | array_length ----------+---------------------------------+-------------- + user_id | user_lastseen | array_length +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); - user_id | user_lastseen | array_length ----------+---------------------------------+-------------- + user_id | user_lastseen | array_length +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); - user_id | user_lastseen | array_length ----------+---------------------------------+-------------- + user_id | user_lastseen | array_length +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); - user_id | user_lastseen | array_length ----------+---------------------------------+-------------- + user_id | user_lastseen | array_length +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); - user_id | user_lastseen | array_length ----------+---------------------------------+-------------- + user_id | user_lastseen | array_length +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) @@ -121,15 +121,15 @@ FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= $4 AND value_1 <= $3) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= $5 AND value_1 <= $6) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= $1 AND value_1 <= $2) -GROUP BY +GROUP BY user_id ORDER BY user_id DESC LIMIT 5; -- enough times (6+) to actually use prepared statements EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); - user_id ---------- + user_id +--------------------------------------------------------------------- 6 5 4 @@ -138,8 +138,8 @@ EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); (5 rows) EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); - user_id ---------- + user_id +--------------------------------------------------------------------- 6 5 4 @@ -148,8 +148,8 @@ EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); (5 rows) EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); - user_id ---------- + user_id +--------------------------------------------------------------------- 6 5 4 @@ -158,8 +158,8 @@ EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); (5 rows) EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); - user_id ---------- + user_id +--------------------------------------------------------------------- 6 5 4 @@ -168,8 +168,8 @@ EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); (5 rows) EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); - user_id ---------- + user_id +--------------------------------------------------------------------- 6 5 4 @@ -178,8 +178,8 @@ EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); (5 rows) EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); - user_id ---------- + user_id +--------------------------------------------------------------------- 6 5 4 @@ -204,52 +204,52 @@ BEGIN short_list.user_id = ma.user_id and ma.value_1 < $1 and short_list.event_type < 3 ) temp ON users_table.user_id = temp.user_id - WHERE + WHERE users_table.value_1 < $2; END; $$ LANGUAGE plpgsql; -- enough times (6+) to actually use prepared statements SELECT plpgsql_subquery_test(1, 2); - plpgsql_subquery_test ------------------------ + plpgsql_subquery_test +--------------------------------------------------------------------- 539 (1 row) SELECT plpgsql_subquery_test(1, 2); - plpgsql_subquery_test ------------------------ + plpgsql_subquery_test +--------------------------------------------------------------------- 539 (1 row) SELECT plpgsql_subquery_test(1, 2); - plpgsql_subquery_test ------------------------ + plpgsql_subquery_test +--------------------------------------------------------------------- 539 (1 row) SELECT plpgsql_subquery_test(1, 2); - plpgsql_subquery_test ------------------------ + plpgsql_subquery_test +--------------------------------------------------------------------- 539 (1 row) SELECT plpgsql_subquery_test(1, 2); - plpgsql_subquery_test ------------------------ + plpgsql_subquery_test +--------------------------------------------------------------------- 539 (1 row) SELECT plpgsql_subquery_test(1, 2); - plpgsql_subquery_test ------------------------ + plpgsql_subquery_test +--------------------------------------------------------------------- 539 (1 row) -- this should also work, but should return 0 given that int = NULL is always returns false SELECT plpgsql_subquery_test(1, NULL); - plpgsql_subquery_test ------------------------ + plpgsql_subquery_test +--------------------------------------------------------------------- 0 (1 row) @@ -267,7 +267,7 @@ CREATE FUNCTION sql_subquery_test(int, int) RETURNS bigint AS $$ short_list.user_id = ma.user_id and ma.value_1 < $1 and short_list.event_type < 3 ) temp ON users_table.user_id = temp.user_id - WHERE + WHERE users_table.value_1 < $2; $$ LANGUAGE SQL; -- should error out @@ -277,7 +277,7 @@ DETAIL: Possibly this is caused by the use of parameters in SQL functions, whic HINT: Consider using PL/pgSQL functions instead. CONTEXT: SQL function "sql_subquery_test" statement 1 -- the joins are actually removed since they are --- not needed by PostgreSQL planner (e.g., target list +-- not needed by PostgreSQL planner (e.g., target list -- doesn't contain anything from there) -- but Citus can still pushdown this query SELECT @@ -302,8 +302,8 @@ INNER JOIN ( ) t3 ON t1.user_id = t3.user_id GROUP BY 1 ORDER BY 2 DESC; - user_id | count ----------+------- + user_id | count +--------------------------------------------------------------------- 5 | 676 4 | 529 2 | 324 @@ -313,7 +313,7 @@ ORDER BY 2 DESC; (6 rows) -- the joins are actually removed since they are --- not needed by PostgreSQL planner (e.g., target list +-- not needed by PostgreSQL planner (e.g., target list -- doesn't contain anything from there) -- but Citus can still plan this query even though the query -- is not safe to pushdown @@ -339,8 +339,8 @@ INNER JOIN ( ) t3 ON t1.user_id = t3.user_id GROUP BY 1 ORDER BY 2 DESC; - user_id | count ----------+------- + user_id | count +--------------------------------------------------------------------- 5 | 676 4 | 529 2 | 324 @@ -374,8 +374,8 @@ INNER JOIN ( ) t3 ON t1.user_id = t3.user_id ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC, 5 DESC, 6 DESC, 7 DESC, 8 DESC LIMIT 5; - user_id | time | value_1 | value_2 | value_3 | value_4 | user_id | user_id ----------+---------------------------------+---------+---------+---------+---------+---------+--------- + user_id | time | value_1 | value_2 | value_3 | value_4 | user_id | user_id +--------------------------------------------------------------------- 6 | Thu Nov 23 14:43:18.024104 2017 | 3 | 2 | 5 | | 6 | 6 6 | Thu Nov 23 14:43:18.024104 2017 | 3 | 2 | 5 | | 6 | 6 6 | Thu Nov 23 14:43:18.024104 2017 | 3 | 2 | 5 | | 6 | 6 diff --git a/src/test/regress/expected/multi_subquery_union.out b/src/test/regress/expected/multi_subquery_union.out index 78f69cd2b..2206e5a4a 100644 --- a/src/test/regress/expected/multi_subquery_union.out +++ b/src/test/regress/expected/multi_subquery_union.out @@ -13,8 +13,8 @@ FROM ( ) user_id ORDER BY 2 DESC,1 LIMIT 5; - user_id | counter ----------+--------- + user_id | counter +--------------------------------------------------------------------- 2 | 5 3 | 5 4 | 5 @@ -31,8 +31,8 @@ FROM ( ) user_id ORDER BY 2 DESC,1 LIMIT 5; - user_id | max ----------+----- + user_id | max +--------------------------------------------------------------------- 5 | 5 1 | 4 (2 rows) @@ -46,8 +46,8 @@ FROM ( ) user_id ORDER BY 2 DESC,1 LIMIT 5; - user_id | counter ----------+--------- + user_id | counter +--------------------------------------------------------------------- 2 | 5 3 | 5 4 | 5 @@ -64,8 +64,8 @@ FROM ( ) user_id ORDER BY 2 DESC,1 LIMIT 5; - user_id | counter ----------+--------- + user_id | counter +--------------------------------------------------------------------- 2 | 5 2 | 5 3 | 5 @@ -82,8 +82,8 @@ FROM ( ) user_id ORDER BY 2 DESC,1 LIMIT 5; - user_id | counter ----------+--------- + user_id | counter +--------------------------------------------------------------------- 2 | 5 2 | 5 3 | 5 @@ -101,8 +101,8 @@ FROM ( GROUP BY 1 ORDER BY 2 DESC,1 LIMIT 5; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 2 | 15 3 | 15 4 | 15 @@ -120,8 +120,8 @@ FROM ( GROUP BY 1 ORDER BY 2 DESC,1 LIMIT 5; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 2 | 32 3 | 32 4 | 23 @@ -139,8 +139,8 @@ FROM ( GROUP BY 1 ORDER BY 2 DESC,1 LIMIT 5; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 2 | 15 3 | 15 4 | 15 @@ -159,8 +159,8 @@ GROUP BY user_id --HAVING sum(counter) > 900 ORDER BY 1,2 DESC LIMIT 5; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 1 | 7 2 | 15 3 | 15 @@ -180,8 +180,8 @@ GROUP BY user_id --HAVING sum(counter) > 900 ORDER BY 1,2 DESC LIMIT 5; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 1 | 7 2 | 15 3 | 15 @@ -203,8 +203,8 @@ FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 5 and value_1 < 6 GROUP BY user_id HAVING sum(value_2) > 25 ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; - sum ------ + sum +--------------------------------------------------------------------- 141 94 87 @@ -225,8 +225,8 @@ FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 5 and value_1 < 6 GROUP BY user_id HAVING sum(value_2) > 25 ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; - sum ------ + sum +--------------------------------------------------------------------- 135 87 85 @@ -247,8 +247,8 @@ FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 5 and value_1 < 6 GROUP BY user_id HAVING sum(value_2) > 25 ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; - sum ------ + sum +--------------------------------------------------------------------- 135 87 85 @@ -297,8 +297,8 @@ FROM ( user_id)) AS ftop ORDER BY 2 DESC, 1 DESC LIMIT 5; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 2 | 107 3 | 101 5 | 94 @@ -348,8 +348,8 @@ FROM ( user_id)) AS ftop ORDER BY 2 DESC, 1 DESC LIMIT 5; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 2 | 107 3 | 101 5 | 94 @@ -405,8 +405,8 @@ FROM ) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 43 1 | 42 2 | 28 @@ -456,8 +456,8 @@ FROM ) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 43 1 | 42 2 | 28 @@ -501,8 +501,8 @@ FROM GROUP BY "t1"."user_id") AS t) "q" GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 43 1 | 42 2 | 28 @@ -546,8 +546,8 @@ FROM ) AS t) "q" ORDER BY 1 LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 1 1 @@ -592,8 +592,8 @@ FROM GROUP BY "t1"."user_id") AS t) "q" GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 43 1 | 42 2 | 28 @@ -609,8 +609,8 @@ FROM UNION ALL (SELECT user_id FROM events_table) ) b; - count -------- + count +--------------------------------------------------------------------- 202 (1 row) @@ -623,8 +623,8 @@ FROM UNION ALL (SELECT user_id FROM events_reference_table) ) b; - count -------- + count +--------------------------------------------------------------------- 202 (1 row) @@ -639,8 +639,8 @@ FROM ) b ORDER BY 1 DESC LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 6 6 6 @@ -659,8 +659,8 @@ FROM ) b ORDER BY 1 DESC, 2 DESC LIMIT 5; - user_id | value_3 ----------+--------- + user_id | value_3 +--------------------------------------------------------------------- 6 | 5 6 | 5 6 | 5 @@ -679,8 +679,8 @@ FROM ) b ORDER BY 2 DESC, 1 DESC LIMIT 5; - user_id | value_3_sum ----------+------------- + user_id | value_3_sum +--------------------------------------------------------------------- 4 | 65 4 | 65 5 | 64 @@ -700,8 +700,8 @@ FROM GROUP BY 1 ORDER BY 2 DESC, 1 DESC LIMIT 5; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 2 | 119 4 | 111 3 | 100 @@ -728,8 +728,8 @@ FROM ) b ORDER BY 1 DESC, 2 DESC LIMIT 5; - user_id | value_3 ----------+--------- + user_id | value_3 +--------------------------------------------------------------------- 6 | 5 6 | 5 6 | 3 @@ -757,8 +757,8 @@ FROM GROUP BY user_id ORDER BY 1 DESC LIMIT 5; - max ------ + max +--------------------------------------------------------------------- 5 5 5 @@ -776,8 +776,8 @@ FROM ( ) user_id GROUP BY user_id ORDER BY 1,2; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 0 | 31 1 | 76 2 | 99 @@ -801,8 +801,8 @@ FROM ( SELECT 2 * user_id, sum(value_2) AS counter FROM users_table where value_1 < 5 and value_1 < 6 GROUP BY user_id HAVING sum(value_2) > 25 ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; - sum ------ + sum +--------------------------------------------------------------------- 80 76 55 @@ -833,8 +833,8 @@ UNION GROUP BY user_id) ) as ftop ORDER BY 1,2; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 1 | 20 1 | 62 2 | 50 @@ -891,8 +891,8 @@ UNION ) as ftop ORDER BY 2, 1 LIMIT 10; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 6 | 43 1 | 62 4 | 91 @@ -923,8 +923,8 @@ UNION ) ftop ORDER BY 2, 1 LIMIT 10; - sum | user_id -------+--------- + sum | user_id +--------------------------------------------------------------------- 300 | 1 1200 | 2 1155 | 3 @@ -948,8 +948,8 @@ UNION ) ftop ORDER BY 2, 1 LIMIT 10; - value_2 | user_id ----------+--------- + value_2 | user_id +--------------------------------------------------------------------- 0 | 1 2 | 1 3 | 1 @@ -980,8 +980,8 @@ UNION ALL ) ftop ORDER BY 2, 1 LIMIT 10; - sum | user_id -------+--------- + sum | user_id +--------------------------------------------------------------------- 300 | 1 300 | 1 1200 | 2 @@ -1003,8 +1003,8 @@ FROM ( ) user_id GROUP BY user_id ORDER BY 1,2; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 3 | 101 4 | 91 5 | 94 @@ -1052,8 +1052,8 @@ FROM ( GROUP BY user_id)) AS ftop ORDER BY 1,2; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 1 | 20 1 | 62 2 | 50 @@ -1083,8 +1083,8 @@ FROM UNION ALL (SELECT 2 * user_id FROM events_table) ) b; - count -------- + count +--------------------------------------------------------------------- 202 (1 row) @@ -1107,8 +1107,8 @@ FROM ) b ORDER BY 1 DESC, 2 DESC LIMIT 5; - user_id | value_3 ----------+--------- + user_id | value_3 +--------------------------------------------------------------------- 6 | 5 6 | 5 6 | 3 @@ -1125,8 +1125,8 @@ FROM UNION ALL (SELECT users_table.user_id FROM events_table, users_table WHERE events_table.user_id = users_table.user_id) ) b; - count -------- + count +--------------------------------------------------------------------- 1850 (1 row) @@ -1139,8 +1139,8 @@ FROM UNION ALL (SELECT 1) ) b; - count -------- + count +--------------------------------------------------------------------- 102 (1 row) @@ -1153,8 +1153,8 @@ FROM UNION ALL (SELECT (random() * 100)::int) ) b; - count -------- + count +--------------------------------------------------------------------- 102 (1 row) @@ -1177,8 +1177,8 @@ FROM ) b ORDER BY 1 DESC, 2 DESC LIMIT 5; - user_id | value_3 ----------+--------- + user_id | value_3 +--------------------------------------------------------------------- 6 | 5 6 | 5 6 | 3 @@ -1229,8 +1229,8 @@ FROM ) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype --------+---------------- + types | sumofeventtype +--------------------------------------------------------------------- 0 | 43 1 | 42 2 | 28 diff --git a/src/test/regress/expected/multi_subquery_window_functions.out b/src/test/regress/expected/multi_subquery_window_functions.out index b239f06f2..aabfb9068 100644 --- a/src/test/regress/expected/multi_subquery_window_functions.out +++ b/src/test/regress/expected/multi_subquery_window_functions.out @@ -23,8 +23,8 @@ ORDER BY 3 DESC, 1 DESC, 2 DESC LIMIT 10; - user_id | time | rnk ----------+---------------------------------+----- + user_id | time | rnk +--------------------------------------------------------------------- 2 | Wed Nov 22 20:16:16.614779 2017 | 24 2 | Wed Nov 22 22:06:12.107108 2017 | 23 2 | Wed Nov 22 22:23:25.40611 2017 | 22 @@ -51,8 +51,8 @@ ORDER BY 3 DESC, 1 DESC, 2 DESC LIMIT 10; - user_id | time | rnk ----------+---------------------------------+----- + user_id | time | rnk +--------------------------------------------------------------------- 2 | Wed Nov 22 20:16:16.614779 2017 | 24 2 | Wed Nov 22 22:06:12.107108 2017 | 23 2 | Wed Nov 22 22:23:25.40611 2017 | 22 @@ -79,8 +79,8 @@ ORDER BY 4 DESC, 3 DESC NULLS LAST, 1 DESC, 2 DESC LIMIT 10; - user_id | time | lag_event_type | row_no ----------+---------------------------------+----------------+-------- + user_id | time | lag_event_type | row_no +--------------------------------------------------------------------- 2 | Wed Nov 22 20:16:16.614779 2017 | 0 | 24 2 | Wed Nov 22 22:06:12.107108 2017 | 3 | 23 2 | Wed Nov 22 22:23:25.40611 2017 | 4 | 22 @@ -110,8 +110,8 @@ ORDER BY 2 DESC, 1 DESC, 3 DESC LIMIT 10; - user_id | rnk | avg_val_2 ----------+-----+-------------------- + user_id | rnk | avg_val_2 +--------------------------------------------------------------------- 6 | 2 | 2.0000000000000000 5 | 2 | 2.0909090909090909 4 | 2 | 2.4000000000000000 @@ -140,8 +140,8 @@ ORDER BY 3 DESC NULLS LAST, 1 DESC, 2 DESC LIMIT 10; - min | min | lag_event_type | count ------+---------------------------------+----------------+------- + min | min | lag_event_type | count +--------------------------------------------------------------------- 1 | Thu Nov 23 11:09:38.074595 2017 | 6 | 1 2 | Wed Nov 22 19:00:10.396739 2017 | 5 | 7 1 | Wed Nov 22 18:49:42.327403 2017 | 4 | 21 @@ -166,8 +166,8 @@ SELECT * FROM ) as foo ORDER BY 3 DESC, 1 DESC, 2 DESC NULLS LAST LIMIT 10; - user_id | lag | rank ----------+-----+------ + user_id | lag | rank +--------------------------------------------------------------------- 2 | 2 | 109 5 | 5 | 105 3 | 3 | 103 @@ -195,8 +195,8 @@ SELECT * FROM ) as foo ORDER BY 3 DESC, 1 DESC, 2 DESC NULLS LAST LIMIT 10; - user_id | lag | rank ----------+-----+------ + user_id | lag | rank +--------------------------------------------------------------------- 2 | 2 | 73 4 | 4 | 70 3 | 3 | 69 @@ -239,8 +239,8 @@ JOIN sub_1.user_id ORDER BY 3 DESC, 4 DESC, 1 DESC, 2 DESC NULLS LAST LIMIT 10; - user_id | max | max | max ----------+-----+-----+----- + user_id | max | max | max +--------------------------------------------------------------------- 2 | 2 | 73 | 73 4 | 4 | 70 | 70 3 | 3 | 69 | 69 @@ -270,8 +270,8 @@ ORDER BY 3 DESC, 1 DESC,2 DESC LIMIT 10; - avg | max | my_rank ---------------------+--------------------------+--------- + avg | max | my_rank +--------------------------------------------------------------------- 3.5000000000000000 | Wed Nov 22 00:00:00 2017 | 2 (1 row) @@ -296,8 +296,8 @@ ORDER BY 3 DESC, 1 DESC,2 DESC LIMIT 10; - avg | max | my_rank ---------------------+--------------------------+--------- + avg | max | my_rank +--------------------------------------------------------------------- 3.7500000000000000 | Wed Nov 22 00:00:00 2017 | 2 3.3750000000000000 | Thu Nov 23 00:00:00 2017 | 1 (2 rows) @@ -322,8 +322,8 @@ ORDER BY 2 DESC, 1 DESC LIMIT 10; - avg | my_rank ---------------------+--------- + avg | my_rank +--------------------------------------------------------------------- 3.5000000000000000 | 1 (1 row) @@ -346,8 +346,8 @@ ORDER BY 1, 2, 3 DESC LIMIT 10; - user_id | time | sum ----------+--------------------------+----- + user_id | time | sum +--------------------------------------------------------------------- 1 | Wed Nov 22 00:00:00 2017 | 1 1 | Thu Nov 23 00:00:00 2017 | 7 1 | Thu Nov 23 00:00:00 2017 | 6 @@ -374,8 +374,8 @@ ORDER BY 1, 2, 3 LIMIT 20; - user_id | it_name | count ----------+---------+------- + user_id | it_name | count +--------------------------------------------------------------------- 2 | User_1 | 2 3 | User_1 | 6 4 | User_1 | 2 @@ -395,8 +395,8 @@ ORDER BY 2 DESC, 1 LIMIT 10; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 3 | 44 5 | 43 4 | 41 @@ -417,8 +417,8 @@ GROUP BY user_id ORDER BY 2 DESC,1 LIMIT 10; - user_id | max ----------+----- + user_id | max +--------------------------------------------------------------------- 3 | 15 4 | 13 2 | 10 @@ -440,8 +440,8 @@ ORDER BY 2 DESC, 1 LIMIT 10; - user_id | rank ----------+------ + user_id | rank +--------------------------------------------------------------------- 5 | 6 2 | 5 4 | 5 @@ -462,8 +462,8 @@ ORDER BY 2 DESC, 1 LIMIT 10; - user_id | rank ----------+------ + user_id | rank +--------------------------------------------------------------------- 5 | 6 2 | 5 4 | 5 @@ -496,8 +496,8 @@ FROM GROUP BY user_id ORDER BY 1 DESC LIMIT 5; - max ------- + max +--------------------------------------------------------------------- 5 3.5 3.25 @@ -538,8 +538,8 @@ FROM ( user_id)) AS ftop ORDER BY 2 DESC, 1 DESC LIMIT 5; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 2 | 107 3 | 101 5 | 94 @@ -568,8 +568,8 @@ ORDER BY user_id DESC LIMIT 3; - user_id ---------- + user_id +--------------------------------------------------------------------- 4 3 2 @@ -592,8 +592,8 @@ GROUP BY ORDER BY difference DESC, rank DESC, user_id LIMIT 20; - user_id | rank | difference | distinct_users ----------+------+------------+---------------- + user_id | rank | difference | distinct_users +--------------------------------------------------------------------- 4 | 12 | 306 | 9 5 | 12 | 136 | 8 3 | 1 | 84 | 6 @@ -641,8 +641,8 @@ WHERE ORDER BY abs DESC, user_id LIMIT 10; - user_id | abs ----------+----- + user_id | abs +--------------------------------------------------------------------- 6 | 2 1 | 1 2 | 0 @@ -666,8 +666,8 @@ ORDER BY 1 DESC LIMIT 5; - user_id | count ----------+------- + user_id | count +--------------------------------------------------------------------- 6 | 1 5 | 1 4 | 1 @@ -709,8 +709,8 @@ EXPLAIN (COSTS FALSE, VERBOSE TRUE) user_id)) AS ftop ORDER BY 2 DESC, 1 DESC LIMIT 5; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit Output: remote_scan.user_id, remote_scan.sum -> Sort @@ -721,7 +721,7 @@ EXPLAIN (COSTS FALSE, VERBOSE TRUE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit Output: users_table.user_id, (sum((sum(users_table.value_2) OVER (?)))) -> Sort diff --git a/src/test/regress/expected/multi_subtransactions.out b/src/test/regress/expected/multi_subtransactions.out index d71d54f70..aaa8f2ef5 100644 --- a/src/test/regress/expected/multi_subtransactions.out +++ b/src/test/regress/expected/multi_subtransactions.out @@ -3,9 +3,9 @@ CREATE TABLE artists ( name text NOT NULL ); SELECT create_distributed_table('artists', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- add some data @@ -21,8 +21,8 @@ DELETE FROM artists WHERE id=5; RELEASE SAVEPOINT s1; COMMIT; SELECT * FROM artists WHERE id=5; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) -- ROLLBACK TO SAVEPOINT @@ -33,8 +33,8 @@ DELETE FROM artists WHERE id=5; ROLLBACK TO SAVEPOINT s1; COMMIT; SELECT * FROM artists WHERE id=5; - id | name -----+----------- + id | name +--------------------------------------------------------------------- 5 | Asher Lev (1 row) @@ -48,8 +48,8 @@ INSERT INTO artists VALUES (5, 'Jacob Kahn'); RELEASE SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=5; - id | name -----+------------ + id | name +--------------------------------------------------------------------- 5 | Jacob Kahn (1 row) @@ -63,8 +63,8 @@ DELETE FROM artists WHERE id=5; ROLLBACK TO SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=5; - id | name -----+------------ + id | name +--------------------------------------------------------------------- 5 | Jacob Kahn (1 row) @@ -81,8 +81,8 @@ ROLLBACK TO SAVEPOINT s0; INSERT INTO artists VALUES (6, 'Emily Carr'); COMMIT; SELECT * FROM artists WHERE id=6; - id | name -----+------------ + id | name +--------------------------------------------------------------------- 6 | Emily Carr (1 row) @@ -97,8 +97,8 @@ ROLLBACK TO s2; RELEASE SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=7; - id | name -----+------ + id | name +--------------------------------------------------------------------- (0 rows) -- Recover from errors @@ -131,8 +131,8 @@ ROLLBACK TO s1; INSERT INTO artists VALUES (9, 'Mohsen Namjoo'); COMMIT; SELECT * FROM artists WHERE id IN (7, 8, 9) ORDER BY id; - id | name -----+--------------- + id | name +--------------------------------------------------------------------- 8 | Sogand 9 | Mohsen Namjoo (2 rows) @@ -148,8 +148,8 @@ ROLLBACK TO s1; INSERT INTO artists VALUES (10, 'Mahmoud Farshchian'); COMMIT; SELECT * FROM artists WHERE id IN (9, 10) ORDER BY id; - id | name -----+-------------------- + id | name +--------------------------------------------------------------------- 10 | Mahmoud Farshchian (1 row) @@ -164,8 +164,8 @@ ROLLBACK TO s1; INSERT INTO artists VALUES (11, 'Egon Schiele'); COMMIT; SELECT * FROM artists WHERE id IN (10, 11) ORDER BY id; - id | name -----+-------------- + id | name +--------------------------------------------------------------------- 11 | Egon Schiele (1 row) @@ -180,8 +180,8 @@ ROLLBACK TO s1; INSERT INTO artists VALUES (12, 'Marc Chagall'); COMMIT; SELECT * FROM artists WHERE id IN (11, 12) ORDER BY id; - id | name -----+-------------- + id | name +--------------------------------------------------------------------- 12 | Marc Chagall (1 row) @@ -191,9 +191,9 @@ create table t2(a int, b int CHECK(b > 0)); ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1190000; select create_distributed_table('t1', 'a'), create_distributed_table('t2', 'a'); - create_distributed_table | create_distributed_table ---------------------------+-------------------------- - | + create_distributed_table | create_distributed_table +--------------------------------------------------------------------- + | (1 row) begin; @@ -207,28 +207,28 @@ with r AS ( update t1 set b = b - 10 returning * ) insert into t2 select * from r; -ERROR: new row for relation "t2_1190005" violates check constraint "t2_b_check" +ERROR: new row for relation "t2_xxxxxxx" violates check constraint "t2_b_check" rollback to savepoint s1; savepoint s2; with r AS ( update t2 set b = b - 10 returning * ) insert into t1 select * from r; -ERROR: new row for relation "t2_1190004" violates check constraint "t2_b_check" +ERROR: new row for relation "t2_xxxxxxx" violates check constraint "t2_b_check" rollback to savepoint s2; savepoint s3; with r AS ( insert into t2 select i, i+1 from generate_series(-10,-5) i returning * ) insert into t1 select * from r; -ERROR: new row for relation "t2_1190004" violates check constraint "t2_b_check" +ERROR: new row for relation "t2_xxxxxxx" violates check constraint "t2_b_check" rollback to savepoint s3; savepoint s4; with r AS ( insert into t1 select i, i+1 from generate_series(-10,-5) i returning * ) insert into t2 select * from r; -ERROR: new row for relation "t2_1190005" violates check constraint "t2_b_check" +ERROR: new row for relation "t2_xxxxxxx" violates check constraint "t2_b_check" rollback to savepoint s4; with r AS ( update t2 set b = b + 1 @@ -236,16 +236,16 @@ with r AS ( ) insert into t1 select * from r; commit; select * from t2 order by a, b; - a | b ----+--- + a | b +--------------------------------------------------------------------- 1 | 4 2 | 5 3 | 6 (3 rows) select * from t1 order by a, b; - a | b ----+--- + a | b +--------------------------------------------------------------------- 1 | 3 1 | 4 2 | 4 @@ -265,9 +265,9 @@ CREATE TABLE researchers ( ); SET citus.shard_count TO 2; SELECT create_distributed_table('researchers', 'lab_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Basic rollback and release @@ -279,8 +279,8 @@ ROLLBACK TO s1; RELEASE SAVEPOINT s1; COMMIT; SELECT * FROM researchers WHERE id in (7, 8); - id | lab_id | name -----+--------+----------- + id | lab_id | name +--------------------------------------------------------------------- 7 | 4 | Jan Plaza (1 row) @@ -294,8 +294,8 @@ ROLLBACK TO SAVEPOINT s1; INSERT INTO researchers VALUES (12, 10, 'Stephen Kleene'); COMMIT; SELECT * FROM researchers WHERE lab_id=10; - id | lab_id | name -----+--------+---------------- + id | lab_id | name +--------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -310,8 +310,8 @@ SAVEPOINT s2; ERROR: current transaction is aborted, commands ignored until end of transaction block ROLLBACK; SELECT * FROM researchers WHERE lab_id=10; - id | lab_id | name -----+--------+---------------- + id | lab_id | name +--------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -326,8 +326,8 @@ SAVEPOINT s2; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT * FROM researchers WHERE lab_id=10; - id | lab_id | name -----+--------+---------------- + id | lab_id | name +--------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -344,8 +344,8 @@ END $$; NOTICE: caught not_null_violation COMMIT; SELECT * FROM researchers WHERE lab_id=10; - id | lab_id | name -----+--------+---------------- + id | lab_id | name +--------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -361,8 +361,8 @@ END $$; NOTICE: caught manual plpgsql_error COMMIT; SELECT * FROM researchers WHERE lab_id=10; - id | lab_id | name -----+--------+---------------- + id | lab_id | name +--------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -378,8 +378,8 @@ END $$; ERROR: not_null_violation COMMIT; SELECT * FROM researchers WHERE lab_id=10; - id | lab_id | name -----+--------+---------------- + id | lab_id | name +--------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -395,8 +395,8 @@ EXCEPTION END $$; COMMIT; SELECT * FROM researchers WHERE lab_id=10; - id | lab_id | name -----+--------+------------------ + id | lab_id | name +--------------------------------------------------------------------- 12 | 10 | Stephen Kleene 32 | 10 | Raymond Smullyan (2 rows) diff --git a/src/test/regress/expected/multi_table_ddl.out b/src/test/regress/expected/multi_table_ddl.out index 7fe9af2e9..826b7aad1 100644 --- a/src/test/regress/expected/multi_table_ddl.out +++ b/src/test/regress/expected/multi_table_ddl.out @@ -5,9 +5,9 @@ SET citus.next_shard_id TO 870000; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- verify that the citus extension can't be dropped while distributed tables exist @@ -30,9 +30,9 @@ COMMIT; -- recreate testtableddl CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- verify that the table can be dropped @@ -42,14 +42,14 @@ CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -- create table and do create empty shard test here, too SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT 1 FROM master_create_empty_shard('testtableddl'); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -58,18 +58,18 @@ DROP TABLE testtableddl; RESET citus.shard_replication_factor; -- ensure no metadata of distributed tables are remaining SELECT * FROM pg_dist_partition; - logicalrelid | partmethod | partkey | colocationid | repmodel ---------------+------------+---------+--------------+---------- + logicalrelid | partmethod | partkey | colocationid | repmodel +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_shard; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ---------------+---------+--------------+---------------+--------------- + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_shard_placement; - shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+----------+----------+------------- + shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- (0 rows) -- check that the extension now can be dropped (and recreated) @@ -77,14 +77,14 @@ DROP EXTENSION citus; CREATE EXTENSION citus; -- re-add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -93,9 +93,9 @@ CREATE TABLE testserialtable(id serial, group_id integer); SET citus.shard_count TO 2; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('testserialtable', 'group_id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- should not be able to add additional serial columns @@ -122,7 +122,7 @@ DROP TABLE testserialtable; \c - - - :worker_1_port \ds List of relations - Schema | Name | Type | Owner ---------+------+------+------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_task_assignment_policy.out b/src/test/regress/expected/multi_task_assignment_policy.out index 470860e0b..67f29166d 100644 --- a/src/test/regress/expected/multi_task_assignment_policy.out +++ b/src/test/regress/expected/multi_task_assignment_policy.out @@ -29,9 +29,9 @@ SET citus.explain_distributed_queries TO off; -- and check that tasks are assigned to worker nodes as expected. CREATE TABLE task_assignment_test_table (test_id integer); SELECT create_distributed_table('task_assignment_test_table', 'test_id', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Create logical shards with shardids 200, 201, and 202 @@ -39,7 +39,7 @@ INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, s SELECT pg_class.oid, series.index, 'r', 1, 1000 FROM pg_class, generate_series(200, 202) AS series(index) WHERE pg_class.relname = 'task_assignment_test_table'; --- Create shard placements for shard 200 and 201 +-- Create shard placements for shard xxxxx and 201 INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 200, 1, 1, nodename, nodeport FROM pg_dist_shard_placement @@ -52,7 +52,7 @@ INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 2; --- Create shard placements for shard 202 +-- Create shard placements for shard xxxxx INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 202, 1, 1, nodename, nodeport FROM pg_dist_shard_placement @@ -70,11 +70,11 @@ SET client_min_messages TO DEBUG3; SET citus.task_assignment_policy TO 'greedy'; EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task 1 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 2 to node localhost:57637 - QUERY PLAN ----------------------------------------------------------------------- +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -82,11 +82,11 @@ DEBUG: assigned task 2 to node localhost:57637 EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task 1 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 2 to node localhost:57637 - QUERY PLAN ----------------------------------------------------------------------- +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -96,11 +96,11 @@ DEBUG: assigned task 2 to node localhost:57637 SET citus.task_assignment_policy TO 'first-replica'; EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task 1 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 - QUERY PLAN ----------------------------------------------------------------------- +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -108,11 +108,11 @@ DEBUG: assigned task 3 to node localhost:57638 EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task 1 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 - QUERY PLAN ----------------------------------------------------------------------- +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node localhost:xxxxx + QUERY PLAN +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -121,9 +121,9 @@ DEBUG: assigned task 3 to node localhost:57638 COMMIT; CREATE TABLE task_assignment_reference_table (test_id integer); SELECT create_reference_table('task_assignment_reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -135,8 +135,8 @@ EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - QUERY PLAN --------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (2 rows) @@ -145,8 +145,8 @@ EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - QUERY PLAN --------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (2 rows) @@ -156,8 +156,8 @@ EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - QUERY PLAN --------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (2 rows) @@ -166,8 +166,8 @@ EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - QUERY PLAN --------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (2 rows) @@ -191,8 +191,8 @@ INSERT INTO explain_outputs SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_reference_table;', 'task_assignment_reference_table'); -- given that we're in the same transaction, the count should be 1 SELECT count(DISTINCT value) FROM explain_outputs; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -210,8 +210,8 @@ INSERT INTO explain_outputs -- given that we're in the same transaction, the count should be 2 -- since there are two different worker nodes SELECT count(DISTINCT value) FROM explain_outputs; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -223,9 +223,9 @@ TRUNCATE explain_outputs; SET citus.shard_replication_factor TO 2; CREATE TABLE task_assignment_replicated_hash (test_id integer); SELECT create_distributed_table('task_assignment_replicated_hash', 'test_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -237,8 +237,8 @@ INSERT INTO explain_outputs SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_replicated_hash;', 'task_assignment_replicated_hash'); -- given that we're in the same transaction, the count should be 1 SELECT count(DISTINCT value) FROM explain_outputs; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -256,8 +256,8 @@ INSERT INTO explain_outputs -- given that we're in the same transaction, the count should be 2 -- since there are two different worker nodes SELECT count(DISTINCT value) FROM explain_outputs; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -267,9 +267,9 @@ TRUNCATE explain_outputs; SET citus.shard_replication_factor TO 1; CREATE TABLE task_assignment_nonreplicated_hash (test_id integer, ref_id integer); SELECT create_distributed_table('task_assignment_nonreplicated_hash', 'test_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- run the query two times to make sure that it hits the correct worker every time @@ -289,8 +289,8 @@ FROM (SELECT * FROM task_assignment_nonreplicated_hash WHERE test_id = 3) AS dis $cmd$, 'task_assignment_nonreplicated_hash'); -- The count should be 1 since the shard exists in only one worker node SELECT count(DISTINCT value) FROM explain_outputs; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -299,9 +299,9 @@ TRUNCATE explain_outputs; -- only contains intermediate results CREATE TABLE task_assignment_test_table_2 (test_id integer); SELECT create_distributed_table('task_assignment_test_table_2', 'test_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET citus.task_assignment_policy TO 'round-robin'; @@ -318,8 +318,8 @@ $cmd$, 'task_assignment_test_table_2'); -- The count should be 2 since the intermediate results are processed on -- different workers SELECT count(DISTINCT value) FROM explain_outputs; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) diff --git a/src/test/regress/expected/multi_task_string_size.out b/src/test/regress/expected/multi_task_string_size.out index 1eec2b95b..7e4a3c188 100644 --- a/src/test/regress/expected/multi_task_string_size.out +++ b/src/test/regress/expected/multi_task_string_size.out @@ -206,15 +206,15 @@ CREATE TABLE wide_table long_column_200 int ); SELECT create_distributed_table('wide_table', 'long_column_001'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET citus.task_executor_type TO 'task-tracker'; SHOW citus.max_task_string_size; - citus.max_task_string_size ----------------------------- + citus.max_task_string_size +--------------------------------------------------------------------- 12288 (1 row) @@ -232,8 +232,8 @@ ERROR: Task failed to execute CONTEXT: PL/pgSQL function raise_failed_execution(text) line 6 at RAISE -- following will succeed since it fetches few columns SELECT u.long_column_001, u.long_column_002, u.long_column_003 FROM wide_table u JOIN wide_table v ON (u.long_column_002 = v.long_column_003); - long_column_001 | long_column_002 | long_column_003 ------------------+-----------------+----------------- + long_column_001 | long_column_002 | long_column_003 +--------------------------------------------------------------------- (0 rows) RESET client_min_messages; diff --git a/src/test/regress/expected/multi_test_catalog_views.out b/src/test/regress/expected/multi_test_catalog_views.out index 80dab1b3b..f99b3d7d7 100644 --- a/src/test/regress/expected/multi_test_catalog_views.out +++ b/src/test/regress/expected/multi_test_catalog_views.out @@ -97,8 +97,8 @@ ORDER BY a.attrelid, a.attnum; $desc_views$ ); - run_command_on_master_and_workers ------------------------------------ - + run_command_on_master_and_workers +--------------------------------------------------------------------- + (1 row) diff --git a/src/test/regress/expected/multi_test_helpers.out b/src/test/regress/expected/multi_test_helpers.out index 117a467e4..850f143a1 100644 --- a/src/test/regress/expected/multi_test_helpers.out +++ b/src/test/regress/expected/multi_test_helpers.out @@ -88,8 +88,8 @@ CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 1500 ALTER SYSTEM SET citus.metadata_sync_interval TO 3000; ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_tpch_query1.out b/src/test/regress/expected/multi_tpch_query1.out index 80e086b81..c5dba76df 100644 --- a/src/test/regress/expected/multi_tpch_query1.out +++ b/src/test/regress/expected/multi_tpch_query1.out @@ -23,8 +23,8 @@ GROUP BY ORDER BY l_returnflag, l_linestatus; - l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order ---------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+------------- + l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order +--------------------------------------------------------------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 diff --git a/src/test/regress/expected/multi_tpch_query10.out b/src/test/regress/expected/multi_tpch_query10.out index 0a71a2914..e5905b64b 100644 --- a/src/test/regress/expected/multi_tpch_query10.out +++ b/src/test/regress/expected/multi_tpch_query10.out @@ -34,18 +34,18 @@ GROUP BY ORDER BY revenue DESC LIMIT 20; - c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment ------------+--------------------+-------------+-----------+---------------------------+---------------------------------------+-----------------+--------------------------------------------------------------------------------------------------------------------- + c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment +--------------------------------------------------------------------- 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi - 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto - 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole + 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto + 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole 223 | Customer#000000223 | 218652.8040 | 7476.20 | SAUDI ARABIA | ftau6Pk,brboMyEl,,kFm | 30-193-643-1517 | al, regular requests run furiously blithely silent packages. blithely ironic accounts across the furious 613 | Customer#000000613 | 186092.2017 | 6679.75 | EGYPT | AJT,26RbanTdEHOBgTWg | 14-275-416-1669 | ironic, pending deposits: quickl 355 | Customer#000000355 | 168184.4825 | 8727.90 | KENYA | 205r3Xg9ZWjPZNX1z | 24-656-787-6091 | ly bold requests detect furiously. unusual instructions sleep aft 872 | Customer#000000872 | 166831.7294 | -858.61 | PERU | vLP7iNZBK4B,HANFTKabVI3AO Y9O8H | 27-357-139-7164 | detect. packages wake slyly express foxes. even deposits ru 805 | Customer#000000805 | 165239.8440 | 511.69 | IRAN | wCKx5zcHvwpSffyc9qfi9dvqcm9LT,cLAG | 20-732-989-5653 | busy sentiments. pending packages haggle among the express requests-- slyly regular excuses above the slyl 427 | Customer#000000427 | 148033.5226 | 4376.80 | BRAZIL | LHzXa71U2AGqfbqj1yYYqw2MEXq99dWmY | 12-124-309-3821 | y even multipliers according to the regu - 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily + 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily 679 | Customer#000000679 | 145188.0664 | 1394.44 | IRAN | IJf1FlZL9I9m,rvofcoKy5pRUOjUQV | 20-146-696-9508 | ely pending frays boost carefully 160 | Customer#000000160 | 138511.7370 | 4363.17 | JORDAN | 5soVQ3dOCRBWBS | 23-428-666-4806 | olites. silently ironic accounts cajole furious 883 | Customer#000000883 | 128224.1349 | 479.96 | CANADA | qVQ8rWNU5KZYDcS | 13-526-239-6950 | uctions are carefully across the regular, regular asymptote diff --git a/src/test/regress/expected/multi_tpch_query12.out b/src/test/regress/expected/multi_tpch_query12.out index 031d0bf29..664609088 100644 --- a/src/test/regress/expected/multi_tpch_query12.out +++ b/src/test/regress/expected/multi_tpch_query12.out @@ -30,8 +30,8 @@ GROUP BY l_shipmode ORDER BY l_shipmode; - l_shipmode | high_line_count | low_line_count -------------+-----------------+---------------- + l_shipmode | high_line_count | low_line_count +--------------------------------------------------------------------- MAIL | 11 | 15 SHIP | 11 | 19 (2 rows) diff --git a/src/test/regress/expected/multi_tpch_query14.out b/src/test/regress/expected/multi_tpch_query14.out index 51e4aa702..14a8358d7 100644 --- a/src/test/regress/expected/multi_tpch_query14.out +++ b/src/test/regress/expected/multi_tpch_query14.out @@ -15,8 +15,8 @@ WHERE l_partkey = p_partkey AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; - promo_revenue ---------------------- + promo_revenue +--------------------------------------------------------------------- 32.1126387112005225 (1 row) diff --git a/src/test/regress/expected/multi_tpch_query19.out b/src/test/regress/expected/multi_tpch_query19.out index 1a4903de9..c00a5a825 100644 --- a/src/test/regress/expected/multi_tpch_query19.out +++ b/src/test/regress/expected/multi_tpch_query19.out @@ -32,8 +32,8 @@ WHERE AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); - revenue -------------- + revenue +--------------------------------------------------------------------- 144747.0857 (1 row) diff --git a/src/test/regress/expected/multi_tpch_query3.out b/src/test/regress/expected/multi_tpch_query3.out index 775385002..f136b7227 100644 --- a/src/test/regress/expected/multi_tpch_query3.out +++ b/src/test/regress/expected/multi_tpch_query3.out @@ -24,8 +24,8 @@ GROUP BY ORDER BY revenue DESC, o_orderdate; - l_orderkey | revenue | o_orderdate | o_shippriority -------------+-------------+-------------+---------------- + l_orderkey | revenue | o_orderdate | o_shippriority +--------------------------------------------------------------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 10916 | 242749.1996 | 03-11-1995 | 0 diff --git a/src/test/regress/expected/multi_tpch_query6.out b/src/test/regress/expected/multi_tpch_query6.out index c6a54ec5d..a0da69eda 100644 --- a/src/test/regress/expected/multi_tpch_query6.out +++ b/src/test/regress/expected/multi_tpch_query6.out @@ -11,8 +11,8 @@ WHERE and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; - revenue -------------- + revenue +--------------------------------------------------------------------- 243277.7858 (1 row) diff --git a/src/test/regress/expected/multi_tpch_query7.out b/src/test/regress/expected/multi_tpch_query7.out index 24057f946..dfb057d4f 100644 --- a/src/test/regress/expected/multi_tpch_query7.out +++ b/src/test/regress/expected/multi_tpch_query7.out @@ -41,8 +41,8 @@ ORDER BY supp_nation, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- + supp_nation | cust_nation | l_year | revenue +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) diff --git a/src/test/regress/expected/multi_tpch_query7_nested.out b/src/test/regress/expected/multi_tpch_query7_nested.out index 86479e5a5..73ff0bf73 100644 --- a/src/test/regress/expected/multi_tpch_query7_nested.out +++ b/src/test/regress/expected/multi_tpch_query7_nested.out @@ -20,18 +20,18 @@ FROM orders, customer, ( - SELECT + SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation - FROM + FROM nation n1, nation n2 - WHERE + WHERE ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') - OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') + OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) ) AS temp WHERE @@ -50,8 +50,8 @@ ORDER BY supp_nation, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- + supp_nation | cust_nation | l_year | revenue +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) diff --git a/src/test/regress/expected/multi_transaction_recovery.out b/src/test/regress/expected/multi_transaction_recovery.out index 197a464a9..7c5cef8e4 100644 --- a/src/test/regress/expected/multi_transaction_recovery.out +++ b/src/test/regress/expected/multi_transaction_recovery.out @@ -5,8 +5,8 @@ SET citus.next_shard_id TO 1220000; -- properly. SET client_min_messages TO ERROR; SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -17,15 +17,15 @@ SET citus.force_max_query_parallelization TO ON; -- Disable auto-recovery for the initial tests ALTER SYSTEM SET citus.recover_2pc_interval TO -1; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) -- Ensure pg_dist_transaction is empty SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) @@ -57,40 +57,40 @@ INSERT INTO pg_dist_transaction VALUES (1, 'citus_0_should_commit'), INSERT INTO pg_dist_transaction VALUES (1, 'citus_0_should_be_forgotten'), (0, 'citus_0_should_be_forgotten'); SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 6 (1 row) SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit'; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- Confirm that transactions were correctly rolled forward \c - - - :worker_1_port SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit'; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -105,42 +105,42 @@ SET citus.multi_shard_commit_protocol TO '2pc'; -- there are at least 2 entries CREATE TABLE test_recovery (x text); SELECT create_distributed_table('test_recovery', 'x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT count(*) >= 2 FROM pg_dist_transaction; - ?column? ----------- + ?column? +--------------------------------------------------------------------- t (1 row) -- create_reference_table should add another 2 recovery records CREATE TABLE test_recovery_ref (x text); SELECT create_reference_table('test_recovery_ref'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT count(*) >= 4 FROM pg_dist_transaction; - ?column? ----------- + ?column? +--------------------------------------------------------------------- t (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) -- plain INSERT does not use 2PC INSERT INTO test_recovery VALUES ('hello'); SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -149,28 +149,28 @@ BEGIN; ALTER TABLE test_recovery ADD COLUMN y text; ROLLBACK; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- Committed DDL commands should write 4 transaction recovery records ALTER TABLE test_recovery ADD COLUMN y text; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -179,50 +179,50 @@ BEGIN; INSERT INTO test_recovery SELECT x, 'earth' FROM test_recovery; ROLLBACK; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- Committed INSERT..SELECT should write 4 transaction recovery records INSERT INTO test_recovery SELECT x, 'earth' FROM test_recovery; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) -- Committed INSERT..SELECT via coordinator should write 4 transaction recovery records INSERT INTO test_recovery (x) SELECT 'hello-'||s FROM generate_series(1,100) s; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) -- Committed COPY should write 4 transaction records COPY test_recovery (x) FROM STDIN CSV; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) @@ -232,9 +232,9 @@ CREATE TABLE test_recovery_single (LIKE test_recovery); -- creating distributed table should write 2 transaction recovery records -- one connection/transaction per node SELECT create_distributed_table('test_recovery_single', 'x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Multi-statement transactions should write 2 transaction recovery records @@ -245,14 +245,14 @@ INSERT INTO test_recovery_single VALUES ('hello-0'); INSERT INTO test_recovery_single VALUES ('hello-2'); COMMIT; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) @@ -264,14 +264,14 @@ INSERT INTO test_recovery_single VALUES ('hello-0'); INSERT INTO test_recovery_single VALUES ('hello-2'); COMMIT; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) @@ -280,14 +280,14 @@ SELECT recover_prepared_transactions(); SET citus.force_max_query_parallelization TO OFF; BEGIN; SELECT count(*) FROM test_recovery_single WHERE x = 'hello-0'; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) SELECT count(*) FROM test_recovery_single WHERE x = 'hello-2'; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -295,14 +295,14 @@ INSERT INTO test_recovery_single VALUES ('hello-0'); INSERT INTO test_recovery_single VALUES ('hello-2'); COMMIT; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) @@ -311,14 +311,14 @@ SELECT recover_prepared_transactions(); SET citus.force_max_query_parallelization TO ON; BEGIN; SELECT count(*) FROM test_recovery_single WHERE x = 'hello-0'; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) SELECT count(*) FROM test_recovery_single WHERE x = 'hello-2'; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) @@ -326,36 +326,36 @@ INSERT INTO test_recovery_single VALUES ('hello-0'); INSERT INTO test_recovery_single VALUES ('hello-2'); COMMIT; SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) -- Test whether auto-recovery runs ALTER SYSTEM SET citus.recover_2pc_interval TO 10; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) -- Sleep 1 second to give Valgrind enough time to clear transactions SELECT pg_sleep(1); - pg_sleep ----------- - + pg_sleep +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_dist_transaction; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) ALTER SYSTEM RESET citus.recover_2pc_interval; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) @@ -363,8 +363,8 @@ DROP TABLE test_recovery_ref; DROP TABLE test_recovery; DROP TABLE test_recovery_single; SELECT 1 FROM master_remove_node('localhost', :master_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/multi_transactional_drop_shards.out b/src/test/regress/expected/multi_transactional_drop_shards.out index 307bfa70e..867e434c0 100644 --- a/src/test/regress/expected/multi_transactional_drop_shards.out +++ b/src/test/regress/expected/multi_transactional_drop_shards.out @@ -7,9 +7,9 @@ SET citus.shard_count TO 4; -- test DROP TABLE(ergo master_drop_all_shards) in transaction, then ROLLBACK CREATE TABLE transactional_drop_shards(column1 int); SELECT create_distributed_table('transactional_drop_shards', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -17,8 +17,8 @@ DROP TABLE transactional_drop_shards; ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- 1410000 1410001 1410002 @@ -33,8 +33,8 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 1410000 | 1 | localhost | 57637 1410000 | 1 | localhost | 57638 1410001 | 1 | localhost | 57637 @@ -48,8 +48,8 @@ ORDER BY -- verify table is not dropped \dt transactional_drop_shards List of relations - Schema | Name | Type | Owner ---------+---------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | transactional_drop_shards | table | postgres (1 row) @@ -57,8 +57,8 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations - Schema | Name | Type | Owner ---------+-----------------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | transactional_drop_shards_1410000 | table | postgres public | transactional_drop_shards_1410001 | table | postgres public | transactional_drop_shards_1410002 | table | postgres @@ -72,8 +72,8 @@ DROP TABLE transactional_drop_shards; COMMIT; -- verify metadata is deleted SELECT shardid FROM pg_dist_shard WHERE shardid IN (1410000, 1410001, 1410002, 1410003) ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- (0 rows) SELECT @@ -84,52 +84,52 @@ WHERE shardid IN (1410000, 1410001, 1410002, 1410003) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- (0 rows) -- verify table is dropped \dt transactional_drop_shards List of relations - Schema | Name | Type | Owner ---------+------+------+------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- (0 rows) -- verify shards are dropped \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations - Schema | Name | Type | Owner ---------+------+------+------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- (0 rows) \c - - - :master_port -- test master_delete_protocol in transaction, then ROLLBACK CREATE TABLE transactional_drop_shards(column1 int); SELECT create_distributed_table('transactional_drop_shards', 'column1', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_empty_shard('transactional_drop_shards'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 1410004 (1 row) BEGIN; SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); - master_apply_delete_command ------------------------------ + master_apply_delete_command +--------------------------------------------------------------------- 1 (1 row) ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- 1410004 (1 row) @@ -141,8 +141,8 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 1410004 | 1 | localhost | 57637 1410004 | 1 | localhost | 57638 (2 rows) @@ -151,8 +151,8 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations - Schema | Name | Type | Owner ---------+-----------------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | transactional_drop_shards_1410004 | table | postgres (1 row) @@ -160,16 +160,16 @@ ORDER BY -- test master_delete_protocol in transaction, then COMMIT BEGIN; SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); - master_apply_delete_command ------------------------------ + master_apply_delete_command +--------------------------------------------------------------------- 1 (1 row) COMMIT; -- verify metadata is deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- (0 rows) SELECT @@ -180,23 +180,23 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- (0 rows) -- verify shards are dropped \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations - Schema | Name | Type | Owner ---------+------+------+------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- (0 rows) \c - - - :master_port -- test DROP table in a transaction after insertion SELECT master_create_empty_shard('transactional_drop_shards'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 1410005 (1 row) @@ -206,8 +206,8 @@ DROP TABLE transactional_drop_shards; ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- 1410005 (1 row) @@ -219,8 +219,8 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 1410005 | 1 | localhost | 57637 1410005 | 1 | localhost | 57638 (2 rows) @@ -228,8 +228,8 @@ ORDER BY -- verify table is not dropped \dt transactional_drop_shards List of relations - Schema | Name | Type | Owner ---------+---------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | transactional_drop_shards | table | postgres (1 row) @@ -237,8 +237,8 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations - Schema | Name | Type | Owner ---------+-----------------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | transactional_drop_shards_1410005 | table | postgres (1 row) @@ -247,16 +247,16 @@ ORDER BY BEGIN; INSERT INTO transactional_drop_shards VALUES (1); SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); - master_apply_delete_command ------------------------------ + master_apply_delete_command +--------------------------------------------------------------------- 1 (1 row) ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- 1410005 (1 row) @@ -268,8 +268,8 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 1410005 | 1 | localhost | 57637 1410005 | 1 | localhost | 57638 (2 rows) @@ -278,8 +278,8 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations - Schema | Name | Type | Owner ---------+-----------------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | transactional_drop_shards_1410005 | table | postgres (1 row) @@ -297,8 +297,8 @@ ERROR: illegal value \set VERBOSITY default -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- 1410005 (1 row) @@ -310,8 +310,8 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 1410005 | 1 | localhost | 57637 1410005 | 1 | localhost | 57638 (2 rows) @@ -319,8 +319,8 @@ ORDER BY -- verify table is not dropped \dt transactional_drop_shards List of relations - Schema | Name | Type | Owner ---------+---------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | transactional_drop_shards | table | postgres (1 row) @@ -328,8 +328,8 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations - Schema | Name | Type | Owner ---------+-----------------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | transactional_drop_shards_1410005 | table | postgres (1 row) @@ -337,9 +337,9 @@ ORDER BY -- test DROP reference table with failing worker CREATE TABLE transactional_drop_reference(column1 int); SELECT create_reference_table('transactional_drop_reference'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) \set VERBOSITY terse @@ -348,8 +348,8 @@ ERROR: illegal value \set VERBOSITY default -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_reference'::regclass ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- 1410006 (1 row) @@ -361,8 +361,8 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_reference'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 1410006 | 1 | localhost | 57637 1410006 | 1 | localhost | 57638 (2 rows) @@ -370,8 +370,8 @@ ORDER BY -- verify table is not dropped \dt transactional_drop_reference List of relations - Schema | Name | Type | Owner ---------+------------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | transactional_drop_reference | table | postgres (1 row) @@ -379,8 +379,8 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_reference* List of relations - Schema | Name | Type | Owner ---------+--------------------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | transactional_drop_reference_1410006 | table | postgres (1 row) @@ -392,8 +392,8 @@ ERROR: illegal value \set VERBOSITY default -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- 1410005 (1 row) @@ -405,8 +405,8 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 1410005 | 1 | localhost | 57637 1410005 | 1 | localhost | 57638 (2 rows) @@ -415,8 +415,8 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations - Schema | Name | Type | Owner ---------+-----------------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | transactional_drop_shards_1410005 | table | postgres (1 row) @@ -426,9 +426,9 @@ DROP EVENT TRIGGER fail_drop_table; SET citus.shard_count TO 8; CREATE TABLE transactional_drop_serial(column1 int, column2 SERIAL); SELECT create_distributed_table('transactional_drop_serial', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- test DROP TABLE(ergo master_drop_all_shards) in transaction, then ROLLBACK @@ -437,8 +437,8 @@ DROP TABLE transactional_drop_serial; ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_serial'::regclass ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- 1410007 1410008 1410009 @@ -457,8 +457,8 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_serial'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 1410007 | 1 | localhost | 57637 1410007 | 1 | localhost | 57638 1410008 | 1 | localhost | 57637 @@ -480,8 +480,8 @@ ORDER BY -- verify table is not dropped \dt transactional_drop_serial List of relations - Schema | Name | Type | Owner ---------+---------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | transactional_drop_serial | table | postgres (1 row) @@ -489,8 +489,8 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_serial_* List of relations - Schema | Name | Type | Owner ---------+-----------------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | transactional_drop_serial_1410007 | table | postgres public | transactional_drop_serial_1410008 | table | postgres public | transactional_drop_serial_1410009 | table | postgres @@ -503,8 +503,8 @@ ORDER BY \ds transactional_drop_serial_column2_seq List of relations - Schema | Name | Type | Owner ---------+------+------+------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -514,8 +514,8 @@ DROP TABLE transactional_drop_serial; COMMIT; -- verify metadata is deleted SELECT shardid FROM pg_dist_shard WHERE shardid IN (1410007, 1410008, 1410009, 1410010, 1410011, 1410012, 1410013, 1410014) ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- (0 rows) SELECT @@ -526,29 +526,29 @@ WHERE shardid IN (1410007, 1410008, 1410009, 1410010, 1410011, 1410012, 1410013, 1410014) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- (0 rows) -- verify table is dropped \dt transactional_drop_serial List of relations - Schema | Name | Type | Owner ---------+------+------+------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- (0 rows) -- verify shards and sequence are dropped \c - - - :worker_1_port \dt transactional_drop_serial_* List of relations - Schema | Name | Type | Owner ---------+------+------+------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- (0 rows) \ds transactional_drop_serial_column2_seq List of relations - Schema | Name | Type | Owner ---------+------+------+------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -557,24 +557,24 @@ SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 4; CREATE TABLE transactional_drop_mx(column1 int); SELECT create_distributed_table('transactional_drop_mx', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='transactional_drop_mx'::regclass; -- make worker 1 receive metadata changes SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) -- see metadata is propogated to the worker \c - - - :worker_1_port SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- 1410015 1410016 1410017 @@ -589,8 +589,8 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 1410015 | 1 | localhost | 57637 1410016 | 1 | localhost | 57638 1410017 | 1 | localhost | 57637 @@ -604,8 +604,8 @@ ROLLBACK; -- verify metadata is not deleted \c - - - :worker_1_port SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- 1410015 1410016 1410017 @@ -620,8 +620,8 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- 1410015 | 1 | localhost | 57637 1410016 | 1 | localhost | 57638 1410017 | 1 | localhost | 57637 @@ -636,8 +636,8 @@ COMMIT; -- verify metadata is deleted \c - - - :worker_1_port SELECT shardid FROM pg_dist_shard WHERE shardid IN (1410015, 1410016, 1410017, 1410018) ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- (0 rows) SELECT @@ -648,40 +648,40 @@ WHERE shardid IN (1410015, 1410016, 1410017, 1410018) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport ----------+------------+----------+---------- + shardid | shardstate | nodename | nodeport +--------------------------------------------------------------------- (0 rows) \c - - - :master_port -- try using the coordinator as a worker and then dropping the table SELECT 1 FROM master_add_node('localhost', :master_port); -NOTICE: Replicating reference table "transactional_drop_reference" to the node localhost:57636 - ?column? ----------- +NOTICE: Replicating reference table "transactional_drop_reference" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) CREATE TABLE citus_local (id serial, k int); SELECT create_distributed_table('citus_local', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO citus_local (k) VALUES (2); DROP TABLE citus_local; SELECT master_remove_node('localhost', :master_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) -- clean the workspace DROP TABLE transactional_drop_shards, transactional_drop_reference; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) -- test DROP TABLE as a non-superuser in a transaction block @@ -690,15 +690,15 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. GRANT ALL ON SCHEMA public TO try_drop_table; SELECT run_command_on_workers('CREATE USER try_drop_table WITH LOGIN'); - run_command_on_workers ------------------------------------ + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) SELECT run_command_on_workers('GRANT ALL ON SCHEMA public TO try_drop_table'); - run_command_on_workers ---------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,GRANT) (localhost,57638,t,GRANT) (2 rows) @@ -707,9 +707,9 @@ SELECT run_command_on_workers('GRANT ALL ON SCHEMA public TO try_drop_table'); BEGIN; CREATE TABLE temp_dist_table (x int, y int); SELECT create_distributed_table('temp_dist_table','x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) DROP TABLE temp_dist_table; diff --git a/src/test/regress/expected/multi_truncate.out b/src/test/regress/expected/multi_truncate.out index a13b309a7..74778c2ff 100644 --- a/src/test/regress/expected/multi_truncate.out +++ b/src/test/regress/expected/multi_truncate.out @@ -10,9 +10,9 @@ SET search_path TO multi_truncate; -- CREATE TABLE test_truncate_append(a int); SELECT create_distributed_table('test_truncate_append', 'a', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- verify no error is thrown when no shards are present @@ -21,35 +21,35 @@ SELECT master_create_empty_shard('test_truncate_append') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 500 WHERE shardid = :new_shard_id; SELECT count(*) FROM test_truncate_append; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) INSERT INTO test_truncate_append values (1); SELECT count(*) FROM test_truncate_append; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- create some more shards SELECT master_create_empty_shard('test_truncate_append'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 1210001 (1 row) SELECT master_create_empty_shard('test_truncate_append'); - master_create_empty_shard ---------------------------- + master_create_empty_shard +--------------------------------------------------------------------- 1210002 (1 row) -- verify 3 shards are presents SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_append'::regclass ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- 1210000 1210001 1210002 @@ -58,15 +58,15 @@ SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_append'::r TRUNCATE TABLE test_truncate_append; -- verify data is truncated from the table SELECT count(*) FROM test_truncate_append; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- verify no shard exists anymore SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_append'::regclass; - shardid ---------- + shardid +--------------------------------------------------------------------- (0 rows) -- command can run inside transaction @@ -78,9 +78,9 @@ DROP TABLE test_truncate_append; -- CREATE TABLE test_truncate_range(a int); SELECT create_distributed_table('test_truncate_range', 'a', 'range'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- verify no error is thrown when no shards are present @@ -95,8 +95,8 @@ SELECT master_create_empty_shard('test_truncate_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2500 WHERE shardid = :new_shard_id; SELECT count(*) FROM test_truncate_range; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -105,15 +105,15 @@ INSERT INTO test_truncate_range values (1001); INSERT INTO test_truncate_range values (2000); INSERT INTO test_truncate_range values (100); SELECT count(*) FROM test_truncate_range; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) -- verify 3 shards are presents SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_range'::regclass ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- 1210003 1210004 1210005 @@ -122,15 +122,15 @@ SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_range'::re TRUNCATE TABLE test_truncate_range; -- verify data is truncated from the table SELECT count(*) FROM test_truncate_range; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- verify 3 shards are still present SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_range'::regclass ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- 1210003 1210004 1210005 @@ -140,8 +140,8 @@ SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_range'::re INSERT INTO test_truncate_range VALUES (1); BEGIN; TRUNCATE TABLE test_truncate_range; ROLLBACK; SELECT count(*) FROM test_truncate_range; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -152,16 +152,16 @@ DROP TABLE test_truncate_range; -- CREATE TABLE test_truncate_hash(a int); SELECT master_create_distributed_table('test_truncate_hash', 'a', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) -- verify no error is thrown when no shards are present TRUNCATE TABLE test_truncate_hash; SELECT count(*) FROM test_truncate_hash; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -182,22 +182,22 @@ ERROR: could not find any shards DETAIL: No shards exist for distributed table "test_truncate_hash". HINT: Run master_create_worker_shards to create shards and try again. SELECT count(*) FROM test_truncate_hash; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- verify 4 shards are present SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_hash'::regclass ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- (0 rows) TRUNCATE TABLE test_truncate_hash; SELECT master_create_worker_shards('test_truncate_hash', 4, 1); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) INSERT INTO test_truncate_hash values (1); @@ -205,23 +205,23 @@ INSERT INTO test_truncate_hash values (1001); INSERT INTO test_truncate_hash values (2000); INSERT INTO test_truncate_hash values (100); SELECT count(*) FROM test_truncate_hash; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) TRUNCATE TABLE test_truncate_hash; -- verify data is truncated from the table SELECT count(*) FROM test_truncate_hash; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- verify 4 shards are still presents SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_hash'::regclass ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- 1210006 1210007 1210008 @@ -232,8 +232,8 @@ SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_hash'::reg INSERT INTO test_truncate_hash VALUES (1); BEGIN; TRUNCATE TABLE test_truncate_hash; ROLLBACK; SELECT count(*) FROM test_truncate_hash; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -242,31 +242,31 @@ DROP TABLE test_truncate_hash; SET citus.shard_replication_factor TO 1; CREATE TABLE "a b hash" (a int, b int); SELECT create_distributed_table('"a b hash"', 'a', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO "a b hash" values (1, 0); SELECT * from "a b hash"; - a | b ----+--- + a | b +--------------------------------------------------------------------- 1 | 0 (1 row) TRUNCATE TABLE "a b hash"; SELECT * from "a b hash"; - a | b ----+--- + a | b +--------------------------------------------------------------------- (0 rows) DROP TABLE "a b hash"; -- now with append CREATE TABLE "a b append" (a int, b int); SELECT create_distributed_table('"a b append"', 'a', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_empty_shard('"a b append"') AS new_shard_id \gset @@ -278,8 +278,8 @@ WHERE shardid = :new_shard_id; INSERT INTO "a b append" values (1, 1); INSERT INTO "a b append" values (600, 600); SELECT * FROM "a b append" ORDER BY a; - a | b ------+----- + a | b +--------------------------------------------------------------------- 1 | 1 600 | 600 (2 rows) @@ -287,8 +287,8 @@ SELECT * FROM "a b append" ORDER BY a; TRUNCATE TABLE "a b append"; -- verify all shards are dropped SELECT shardid FROM pg_dist_shard where logicalrelid = '"a b append"'::regclass; - shardid ---------- + shardid +--------------------------------------------------------------------- (0 rows) DROP TABLE "a b append"; @@ -297,9 +297,9 @@ CREATE TABLE test_local_truncate (x int, y int); INSERT INTO test_local_truncate VALUES (1,2); SELECT create_distributed_table('test_local_truncate', 'x', colocate_with => 'none'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -308,23 +308,23 @@ TRUNCATE test_local_truncate; COMMIT; -- Ensure distributed data is not truncated SELECT * FROM test_local_truncate; - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 2 (1 row) -- Undistribute table SELECT master_drop_all_shards('test_local_truncate', 'pubic', 'test_local_truncate'); - master_drop_all_shards ------------------------- + master_drop_all_shards +--------------------------------------------------------------------- 4 (1 row) DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass; -- Ensure local data is truncated SELECT * FROM test_local_truncate; - x | y ----+--- + x | y +--------------------------------------------------------------------- (0 rows) DROP TABLE test_local_truncate; @@ -333,9 +333,9 @@ CREATE TABLE test_local_truncate (x int, y int); INSERT INTO test_local_truncate VALUES (1,2); SELECT create_distributed_table('test_local_truncate', 'x', colocate_with => 'none'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -344,23 +344,23 @@ TRUNCATE test_local_truncate; ROLLBACK; -- Ensure distributed data is not truncated SELECT * FROM test_local_truncate; - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 2 (1 row) -- Undistribute table SELECT master_drop_all_shards('test_local_truncate', 'pubic', 'test_local_truncate'); - master_drop_all_shards ------------------------- + master_drop_all_shards +--------------------------------------------------------------------- 4 (1 row) DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass; -- Ensure local data is not truncated SELECT * FROM test_local_truncate; - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 2 (1 row) diff --git a/src/test/regress/expected/multi_unsupported_worker_operations.out b/src/test/regress/expected/multi_unsupported_worker_operations.out index 67ee3ad47..af0b2421f 100644 --- a/src/test/regress/expected/multi_unsupported_worker_operations.out +++ b/src/test/regress/expected/multi_unsupported_worker_operations.out @@ -16,23 +16,23 @@ SET citus.shard_count TO 5; -- Create test tables CREATE TABLE mx_table (col_1 int, col_2 text, col_3 BIGSERIAL); SELECT create_distributed_table('mx_table', 'col_1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE mx_table_2 (col_1 int, col_2 text, col_3 BIGSERIAL); SELECT create_distributed_table('mx_table_2', 'col_1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE mx_ref_table (col_1 int, col_2 text); SELECT create_reference_table('mx_ref_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- Check that the created tables are colocated MX tables @@ -40,16 +40,16 @@ SELECT logicalrelid, repmodel, colocationid FROM pg_dist_partition WHERE logicalrelid IN ('mx_table'::regclass, 'mx_table_2'::regclass) ORDER BY logicalrelid; - logicalrelid | repmodel | colocationid ---------------+----------+-------------- + logicalrelid | repmodel | colocationid +--------------------------------------------------------------------- mx_table | s | 150000 mx_table_2 | s | 150000 (2 rows) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) COPY mx_table (col_1, col_2) FROM STDIN WITH (FORMAT 'csv'); @@ -57,8 +57,8 @@ INSERT INTO mx_ref_table VALUES (-37, 'morbi'); INSERT INTO mx_ref_table VALUES (-78, 'sapien'); INSERT INTO mx_ref_table VALUES (-34, 'augue'); SELECT * FROM mx_table ORDER BY col_1; - col_1 | col_2 | col_3 --------+----------+------- + col_1 | col_2 | col_3 +--------------------------------------------------------------------- -37 | 'lorem' | 1 80 | 'dolor' | 3 7344 | 'sit' | 4 @@ -82,8 +82,8 @@ SELECT create_reference_table('mx_table_worker'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM pg_dist_partition WHERE logicalrelid='mx_table_worker'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -96,15 +96,15 @@ SELECT master_create_worker_shards('mx_table', 5, 1); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='mx_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) INSERT INTO pg_dist_shard SELECT * FROM pg_dist_shard_temp; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='mx_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 5 (1 row) @@ -121,10 +121,10 @@ ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. -- DDL commands SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; - Column | Type | Modifiers ---------+---------+---------------------------------------------------------- - col_1 | integer | - col_2 | text | + Column | Type | Modifiers +--------------------------------------------------------------------- + col_1 | integer | + col_2 | text | col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass) (3 rows) @@ -138,10 +138,10 @@ ALTER TABLE mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col_1) REFERE ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; - Column | Type | Modifiers ---------+---------+---------------------------------------------------------- - col_1 | integer | - col_2 | text | + Column | Type | Modifiers +--------------------------------------------------------------------- + col_1 | integer | + col_2 | text | col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass) (3 rows) @@ -151,8 +151,8 @@ SELECT master_drop_all_shards('mx_table'::regclass, 'public', 'mx_table'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 5 (1 row) @@ -161,8 +161,8 @@ SELECT master_apply_delete_command('DELETE FROM mx_table'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM mx_table; - count -------- + count +--------------------------------------------------------------------- 5 (1 row) @@ -171,8 +171,8 @@ SELECT 1 FROM master_add_inactive_node('localhost', 5432); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -180,8 +180,8 @@ SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432; \c - - - :master_port DROP INDEX mx_test_uniq_index; SELECT 1 FROM master_add_inactive_node('localhost', 5432); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -190,16 +190,16 @@ SELECT master_remove_node('localhost', 5432); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) \c - - - :master_port SELECT master_remove_node('localhost', 5432); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port @@ -209,8 +209,8 @@ SELECT mark_tables_colocated('mx_table', ARRAY['mx_table_2']); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT colocationid FROM pg_dist_partition WHERE logicalrelid='mx_table_2'::regclass; - colocationid --------------- + colocationid +--------------------------------------------------------------------- 0 (1 row) @@ -225,17 +225,17 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_port); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; - hasmetadata -------------- + hasmetadata +--------------------------------------------------------------------- f (1 row) -- stop_metadata_sync_to_node \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port @@ -244,29 +244,29 @@ ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. \c - - - :master_port SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; - hasmetadata -------------- + hasmetadata +--------------------------------------------------------------------- t (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; - hasmetadata -------------- + hasmetadata +--------------------------------------------------------------------- f (1 row) \c - - - :worker_2_port SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition; - worker_drop_distributed_table -------------------------------- - - + worker_drop_distributed_table +--------------------------------------------------------------------- + + (2 rows) DELETE FROM pg_dist_node; @@ -278,8 +278,8 @@ DROP TABLE mx_table; ERROR: operation is not allowed on this node \set VERBOSITY default SELECT count(*) FROM mx_table; - count -------- + count +--------------------------------------------------------------------- 5 (1 row) @@ -291,8 +291,8 @@ SELECT master_remove_partition_metadata('mx_table'::regclass, 'public', 'mx_tabl ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM mx_table; - count -------- + count +--------------------------------------------------------------------- 5 (1 row) @@ -312,8 +312,8 @@ SELECT shardid, nodename, nodeport, shardstate FROM pg_dist_shard_placement WHERE shardid = :testshardid ORDER BY nodeport; - shardid | nodename | nodeport | shardstate ----------+-----------+----------+------------ + shardid | nodename | nodeport | shardstate +--------------------------------------------------------------------- 1270000 | localhost | 57637 | 1 1270000 | localhost | 57638 | 3 (2 rows) @@ -331,10 +331,10 @@ DROP SEQUENCE some_sequence; -- Show that dropping the sequence of an MX table with cascade harms the table and shards BEGIN; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; - Column | Type | Modifiers ---------+---------+---------------------------------------------------------- - col_1 | integer | - col_2 | text | + Column | Type | Modifiers +--------------------------------------------------------------------- + col_1 | integer | + col_2 | text | col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass) (3 rows) @@ -343,10 +343,10 @@ SET client_min_messages TO 'WARNING'; DROP SEQUENCE mx_table_col_3_seq CASCADE; RESET client_min_messages; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; - Column | Type | Modifiers ---------+---------+----------- - col_1 | integer | - col_2 | text | + Column | Type | Modifiers +--------------------------------------------------------------------- + col_1 | integer | + col_2 | text | col_3 | bigint | not null (3 rows) @@ -356,16 +356,16 @@ ROLLBACK; DROP TABLE mx_table; DROP TABLE mx_table_2; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) \c - - - :worker_1_port DELETE FROM pg_dist_node; SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition; - worker_drop_distributed_table -------------------------------- + worker_drop_distributed_table +--------------------------------------------------------------------- (0 rows) \c - - - :master_port diff --git a/src/test/regress/expected/multi_upgrade_reference_table.out b/src/test/regress/expected/multi_upgrade_reference_table.out index 8101d3ec1..010bf690a 100644 --- a/src/test/regress/expected/multi_upgrade_reference_table.out +++ b/src/test/regress/expected/multi_upgrade_reference_table.out @@ -22,9 +22,9 @@ DROP TABLE upgrade_reference_table_local; SET citus.shard_count TO 4; CREATE TABLE upgrade_reference_table_multiple_shard(column1 int); SELECT create_distributed_table('upgrade_reference_table_multiple_shard', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_multiple_shard'); @@ -34,9 +34,9 @@ DROP TABLE upgrade_reference_table_multiple_shard; -- test with table which has no shard CREATE TABLE upgrade_reference_table_no_shard(column1 int); SELECT create_distributed_table('upgrade_reference_table_no_shard', 'column1', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_no_shard'); @@ -48,16 +48,16 @@ SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_referenced(column1 int PRIMARY KEY); SELECT create_distributed_table('upgrade_reference_table_referenced', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE upgrade_reference_table_referencing(column1 int REFERENCES upgrade_reference_table_referenced(column1)); SELECT create_distributed_table('upgrade_reference_table_referencing', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- update replication model to statement-based replication since streaming replicated tables cannot be upgraded to reference tables @@ -74,16 +74,16 @@ DROP TABLE upgrade_reference_table_referenced; -- test with no healthy placements CREATE TABLE upgrade_reference_table_unhealthy(column1 int); SELECT create_distributed_table('upgrade_reference_table_unhealthy', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_unhealthy'::regclass; UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_unhealthy'::regclass::oid); SELECT upgrade_to_reference_table('upgrade_reference_table_unhealthy'); -ERROR: could not find any healthy placement for shard 1360006 +ERROR: could not find any healthy placement for shard xxxxx DROP TABLE upgrade_reference_table_unhealthy; -- test with table containing composite type CREATE TYPE upgrade_test_composite_type AS (key1 text, key2 text); @@ -91,16 +91,16 @@ SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_composite(column1 int, column2 upgrade_test_composite_type); SELECT create_distributed_table('upgrade_reference_table_composite', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_composite'::regclass; SELECT upgrade_to_reference_table('upgrade_reference_table_composite'); - upgrade_to_reference_table ----------------------------- - + upgrade_to_reference_table +--------------------------------------------------------------------- + (1 row) DROP TABLE upgrade_reference_table_composite; @@ -108,9 +108,9 @@ DROP TYPE upgrade_test_composite_type; -- test with reference table CREATE TABLE upgrade_reference_table_reference(column1 int); SELECT create_reference_table('upgrade_reference_table_reference'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_reference'); @@ -120,9 +120,9 @@ DROP TABLE upgrade_reference_table_reference; -- test valid cases, append distributed table CREATE TABLE upgrade_reference_table_append(column1 int); SELECT create_distributed_table('upgrade_reference_table_append', 'column1', 'append'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) COPY upgrade_reference_table_append FROM STDIN; @@ -133,8 +133,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; - partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- + partmethod | partkeyisnull | colocationid | repmodel +--------------------------------------------------------------------- a | f | 0 | c (1 row) @@ -144,8 +144,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1360009 | f | f (1 row) @@ -155,8 +155,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- (0 rows) SELECT count(*) active_primaries FROM pg_dist_node WHERE isactive AND noderole='primary' \gset @@ -169,15 +169,15 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_append'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? ----------+---------- + shardid | ?column? +--------------------------------------------------------------------- 1360009 | f (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_append'); - upgrade_to_reference_table ----------------------------- - + upgrade_to_reference_table +--------------------------------------------------------------------- + (1 row) -- situation after upgrade_reference_table @@ -187,8 +187,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; - partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- + partmethod | partkeyisnull | colocationid | repmodel +--------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -198,8 +198,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1360009 | t | t (1 row) @@ -209,8 +209,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -223,8 +223,8 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_append'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? ----------+---------- + shardid | ?column? +--------------------------------------------------------------------- 1360009 | t (1 row) @@ -232,9 +232,9 @@ DROP TABLE upgrade_reference_table_append; -- test valid cases, shard exists at one worker CREATE TABLE upgrade_reference_table_one_worker(column1 int); SELECT create_distributed_table('upgrade_reference_table_one_worker', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_one_worker'::regclass; @@ -245,8 +245,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; - partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- + partmethod | partkeyisnull | colocationid | repmodel +--------------------------------------------------------------------- h | f | 1360001 | c (1 row) @@ -256,8 +256,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1360010 | f | f (1 row) @@ -267,8 +267,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 1360001 | 1 | 1 | 23 | 0 (1 row) @@ -281,15 +281,15 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? ----------+---------- + shardid | ?column? +--------------------------------------------------------------------- 1360010 | f (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_one_worker'); - upgrade_to_reference_table ----------------------------- - + upgrade_to_reference_table +--------------------------------------------------------------------- + (1 row) -- situation after upgrade_reference_table @@ -299,8 +299,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; - partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- + partmethod | partkeyisnull | colocationid | repmodel +--------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -310,8 +310,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1360010 | t | t (1 row) @@ -321,8 +321,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -335,8 +335,8 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? ----------+---------- + shardid | ?column? +--------------------------------------------------------------------- 1360010 | t (1 row) @@ -345,9 +345,9 @@ DROP TABLE upgrade_reference_table_one_worker; SET citus.shard_replication_factor TO 2; CREATE TABLE upgrade_reference_table_one_unhealthy(column1 int); SELECT create_distributed_table('upgrade_reference_table_one_unhealthy', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) UPDATE pg_dist_shard_placement SET shardstate = 3 @@ -359,8 +359,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; - partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- + partmethod | partkeyisnull | colocationid | repmodel +--------------------------------------------------------------------- h | f | 1360002 | c (1 row) @@ -370,8 +370,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1360011 | f | f (1 row) @@ -381,8 +381,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 1360002 | 1 | 2 | 23 | 0 (1 row) @@ -396,15 +396,15 @@ WHERE shardid IN AND shardstate = 1 GROUP BY shardid ORDER BY shardid; - shardid | ?column? ----------+---------- + shardid | ?column? +--------------------------------------------------------------------- 1360011 | f (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_one_unhealthy'); - upgrade_to_reference_table ----------------------------- - + upgrade_to_reference_table +--------------------------------------------------------------------- + (1 row) -- situation after upgrade_reference_table @@ -414,8 +414,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; - partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- + partmethod | partkeyisnull | colocationid | repmodel +--------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -425,8 +425,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1360011 | t | t (1 row) @@ -436,8 +436,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -451,8 +451,8 @@ WHERE shardid IN AND shardstate = 1 GROUP BY shardid ORDER BY shardid; - shardid | ?column? ----------+---------- + shardid | ?column? +--------------------------------------------------------------------- 1360011 | t (1 row) @@ -460,9 +460,9 @@ DROP TABLE upgrade_reference_table_one_unhealthy; -- test valid cases, shard exists at both workers and both are healthy CREATE TABLE upgrade_reference_table_both_healthy(column1 int); SELECT create_distributed_table('upgrade_reference_table_both_healthy', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- situation before upgrade_reference_table @@ -472,8 +472,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; - partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- + partmethod | partkeyisnull | colocationid | repmodel +--------------------------------------------------------------------- h | f | 1360003 | c (1 row) @@ -483,8 +483,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1360012 | f | f (1 row) @@ -494,8 +494,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 1360003 | 1 | 2 | 23 | 0 (1 row) @@ -508,15 +508,15 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass) GROUP BY shardid ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- 1360012 (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_both_healthy'); - upgrade_to_reference_table ----------------------------- - + upgrade_to_reference_table +--------------------------------------------------------------------- + (1 row) -- situation after upgrade_reference_table @@ -526,8 +526,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; - partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- + partmethod | partkeyisnull | colocationid | repmodel +--------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -537,8 +537,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1360012 | t | t (1 row) @@ -548,8 +548,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -562,8 +562,8 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? ----------+---------- + shardid | ?column? +--------------------------------------------------------------------- 1360012 | t (1 row) @@ -572,9 +572,9 @@ DROP TABLE upgrade_reference_table_both_healthy; SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_transaction_rollback(column1 int); SELECT create_distributed_table('upgrade_reference_table_transaction_rollback', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_transaction_rollback'::regclass; @@ -585,8 +585,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; - partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- + partmethod | partkeyisnull | colocationid | repmodel +--------------------------------------------------------------------- h | f | 1360004 | c (1 row) @@ -596,8 +596,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1360013 | f | f (1 row) @@ -607,8 +607,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 1360004 | 1 | 1 | 23 | 0 (1 row) @@ -621,16 +621,16 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? ----------+---------- + shardid | ?column? +--------------------------------------------------------------------- 1360013 | f (1 row) BEGIN; SELECT upgrade_to_reference_table('upgrade_reference_table_transaction_rollback'); - upgrade_to_reference_table ----------------------------- - + upgrade_to_reference_table +--------------------------------------------------------------------- + (1 row) ROLLBACK; @@ -641,8 +641,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; - partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- + partmethod | partkeyisnull | colocationid | repmodel +--------------------------------------------------------------------- h | f | 1360004 | c (1 row) @@ -652,8 +652,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1360013 | f | f (1 row) @@ -663,8 +663,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 1360004 | 1 | 1 | 23 | 0 (1 row) @@ -677,8 +677,8 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? ----------+---------- + shardid | ?column? +--------------------------------------------------------------------- 1360013 | f (1 row) @@ -687,9 +687,9 @@ DROP TABLE upgrade_reference_table_transaction_rollback; SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_transaction_commit(column1 int); SELECT create_distributed_table('upgrade_reference_table_transaction_commit', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_transaction_commit'::regclass; @@ -700,8 +700,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; - partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- + partmethod | partkeyisnull | colocationid | repmodel +--------------------------------------------------------------------- h | f | 1360004 | c (1 row) @@ -711,8 +711,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1360014 | f | f (1 row) @@ -722,8 +722,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 1360004 | 1 | 1 | 23 | 0 (1 row) @@ -736,16 +736,16 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? ----------+---------- + shardid | ?column? +--------------------------------------------------------------------- 1360014 | f (1 row) BEGIN; SELECT upgrade_to_reference_table('upgrade_reference_table_transaction_commit'); - upgrade_to_reference_table ----------------------------- - + upgrade_to_reference_table +--------------------------------------------------------------------- + (1 row) COMMIT; @@ -756,8 +756,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; - partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- + partmethod | partkeyisnull | colocationid | repmodel +--------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -767,8 +767,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1360014 | t | t (1 row) @@ -778,8 +778,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -792,8 +792,8 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? ----------+---------- + shardid | ?column? +--------------------------------------------------------------------- 1360014 | t (1 row) @@ -801,8 +801,8 @@ ORDER BY shardid; \c - - - :worker_2_port \dt upgrade_reference_table_transaction_commit_* List of relations - Schema | Name | Type | Owner ---------+----------------------------------------------------+-------+---------- + Schema | Name | Type | Owner +--------------------------------------------------------------------- public | upgrade_reference_table_transaction_commit_1360014 | table | postgres (1 row) @@ -814,9 +814,9 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE upgrade_reference_table_mx(column1 int); SELECT create_distributed_table('upgrade_reference_table_mx', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- verify that streaming replicated tables cannot be upgraded to reference tables @@ -826,8 +826,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- + partmethod | partkeyisnull | colocationid | repmodel +--------------------------------------------------------------------- h | f | 1360005 | s (1 row) @@ -837,8 +837,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1360015 | f | f (1 row) @@ -848,8 +848,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 1360005 | 1 | 1 | 23 | 0 (1 row) @@ -862,8 +862,8 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) GROUP BY shardid ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- 1360015 (1 row) @@ -877,8 +877,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- + partmethod | partkeyisnull | colocationid | repmodel +--------------------------------------------------------------------- h | f | 1360005 | s (1 row) @@ -888,8 +888,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1360015 | f | f (1 row) @@ -899,8 +899,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 1360005 | 1 | 1 | 23 | 0 (1 row) @@ -913,8 +913,8 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? ----------+---------- + shardid | ?column? +--------------------------------------------------------------------- 1360015 | f (1 row) @@ -925,18 +925,18 @@ SET citus.shard_replication_factor TO 2; RESET citus.replication_model; CREATE TABLE upgrade_reference_table_mx(column1 int); SELECT create_distributed_table('upgrade_reference_table_mx', 'column1'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_2_port AND shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='upgrade_reference_table_mx'::regclass); SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ------------------------------ - + start_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) -- situation before upgrade_reference_table @@ -946,8 +946,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- + partmethod | partkeyisnull | colocationid | repmodel +--------------------------------------------------------------------- h | f | 1360006 | c (1 row) @@ -957,8 +957,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1360016 | f | f (1 row) @@ -968,8 +968,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 1360006 | 1 | 2 | 23 | 0 (1 row) @@ -982,16 +982,16 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) GROUP BY shardid ORDER BY shardid; - shardid ---------- + shardid +--------------------------------------------------------------------- 1360016 (1 row) SET client_min_messages TO WARNING; SELECT upgrade_to_reference_table('upgrade_reference_table_mx'); - upgrade_to_reference_table ----------------------------- - + upgrade_to_reference_table +--------------------------------------------------------------------- + (1 row) -- situation after upgrade_reference_table @@ -1001,8 +1001,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- + partmethod | partkeyisnull | colocationid | repmodel +--------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -1012,8 +1012,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1360016 | t | t (1 row) @@ -1023,8 +1023,8 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -1037,8 +1037,8 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? ----------+---------- + shardid | ?column? +--------------------------------------------------------------------- 1360016 | t (1 row) @@ -1050,8 +1050,8 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- + partmethod | partkeyisnull | colocationid | repmodel +--------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -1061,8 +1061,8 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- + shardid | shardminvalueisnull | shardmaxvalueisnull +--------------------------------------------------------------------- 1360016 | t | t (1 row) @@ -1075,17 +1075,17 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? ----------+---------- + shardid | ?column? +--------------------------------------------------------------------- 1360016 | t (1 row) \c - - - :master_port DROP TABLE upgrade_reference_table_mx; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node ----------------------------- - + stop_metadata_sync_to_node +--------------------------------------------------------------------- + (1 row) RESET client_min_messages; diff --git a/src/test/regress/expected/multi_upsert.out b/src/test/regress/expected/multi_upsert.out index c98fe9649..08308aba0 100644 --- a/src/test/regress/expected/multi_upsert.out +++ b/src/test/regress/expected/multi_upsert.out @@ -8,9 +8,9 @@ CREATE TABLE upsert_test ); -- distribute the table and create shards SELECT create_distributed_table('upsert_test', 'part_key', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- do a regular insert @@ -26,10 +26,10 @@ INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = 2, third_col = 4; -- see the results SELECT * FROM upsert_test ORDER BY part_key ASC; - part_key | other_col | third_col -----------+-----------+----------- + part_key | other_col | third_col +--------------------------------------------------------------------- 1 | 2 | 4 - 2 | 2 | + 2 | 2 | (2 rows) -- do a multi-row DO NOTHING insert @@ -41,11 +41,11 @@ ON CONFLICT (part_key) DO UPDATE SET other_col = EXCLUDED.other_col WHERE upsert_test.part_key != 1; -- see the results SELECT * FROM upsert_test ORDER BY part_key ASC; - part_key | other_col | third_col -----------+-----------+----------- + part_key | other_col | third_col +--------------------------------------------------------------------- 1 | 2 | 4 - 2 | 20 | - 3 | 30 | + 2 | 20 | + 3 | 30 | (3 rows) DELETE FROM upsert_test WHERE part_key = 2; @@ -55,8 +55,8 @@ INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_ke DO UPDATE SET other_col = 30 WHERE upsert_test.other_col = 3; -- see the results SELECT * FROM upsert_test; - part_key | other_col | third_col -----------+-----------+----------- + part_key | other_col | third_col +--------------------------------------------------------------------- 1 | 2 | 4 (1 row) @@ -65,8 +65,8 @@ INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_ke DO UPDATE SET other_col = 30 WHERE upsert_test.other_col = 2; -- see the results SELECT * FROM upsert_test; - part_key | other_col | third_col -----------+-----------+----------- + part_key | other_col | third_col +--------------------------------------------------------------------- 1 | 30 | 4 (1 row) @@ -78,8 +78,8 @@ INSERT INTO upsert_test (part_key, other_col, third_col) VALUES (1, 1, 100) ON C DO UPDATE SET other_col = EXCLUDED.third_col; -- see the results SELECT * FROM upsert_test; - part_key | other_col | third_col -----------+-----------+----------- + part_key | other_col | third_col +--------------------------------------------------------------------- 1 | 100 | 4 (1 row) @@ -88,8 +88,8 @@ INSERT INTO upsert_test as ups_test (part_key) VALUES (1) ON CONFLICT (part_key) DO UPDATE SET other_col = ups_test.other_col + 50, third_col = 200; -- see the results SELECT * FROM upsert_test; - part_key | other_col | third_col -----------+-----------+----------- + part_key | other_col | third_col +--------------------------------------------------------------------- 1 | 150 | 200 (1 row) @@ -99,8 +99,8 @@ INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_ke third_col = upsert_test.third_col + (EXCLUDED.part_key + EXCLUDED.other_col) + 670; -- see the results SELECT * FROM upsert_test; - part_key | other_col | third_col -----------+-----------+----------- + part_key | other_col | third_col +--------------------------------------------------------------------- 1 | 151 | 872 (1 row) @@ -110,8 +110,8 @@ INSERT INTO upsert_test as ups_test (part_key, other_col) VALUES (1, 1) ON CONFL WHERE ups_test.third_col < 1000 + ups_test.other_col; -- see the results SELECT * FROM upsert_test; - part_key | other_col | third_col -----------+-----------+----------- + part_key | other_col | third_col +--------------------------------------------------------------------- 1 | 5 | 872 (1 row) @@ -119,17 +119,17 @@ SELECT * FROM upsert_test; INSERT INTO upsert_test (part_key, other_col) VALUES (2, 2) ON CONFLICT (part_key) DO UPDATE SET other_col = 3 RETURNING *; - part_key | other_col | third_col -----------+-----------+----------- - 2 | 2 | + part_key | other_col | third_col +--------------------------------------------------------------------- + 2 | 2 | (1 row) INSERT INTO upsert_test (part_key, other_col) VALUES (2, 2) ON CONFLICT (part_key) DO UPDATE SET other_col = 3 RETURNING *; - part_key | other_col | third_col -----------+-----------+----------- - 2 | 3 | + part_key | other_col | third_col +--------------------------------------------------------------------- + 2 | 3 | (1 row) -- create another table @@ -142,9 +142,9 @@ CREATE TABLE upsert_test_2 ); -- distribute the table and create shards SELECT create_distributed_table('upsert_test_2', 'part_key', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- now show that Citus works with multiple columns as the PRIMARY KEY, including the partiton key @@ -153,7 +153,7 @@ INSERT INTO upsert_test_2 (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_ -- this errors out since there is no unique constraint on partition key INSERT INTO upsert_test_2 (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO NOTHING; ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- create another table CREATE TABLE upsert_test_3 ( @@ -164,15 +164,15 @@ CREATE TABLE upsert_test_3 CREATE INDEX idx_ups_test ON upsert_test_3(part_key); -- distribute the table and create shards SELECT create_distributed_table('upsert_test_3', 'part_key', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- since there are no unique indexes, error-out INSERT INTO upsert_test_3 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_3.count + 1; ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- create another table CREATE TABLE upsert_test_4 ( @@ -181,9 +181,9 @@ CREATE TABLE upsert_test_4 ); -- distribute the table and create shards SELECT create_distributed_table('upsert_test_4', 'part_key', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- a single row insert @@ -197,8 +197,8 @@ INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET coun INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_4.count + 1; -- now see the results SELECT * FROM upsert_test_4; - part_key | count -----------+------- + part_key | count +--------------------------------------------------------------------- 1 | 6 (1 row) @@ -206,9 +206,9 @@ SELECT * FROM upsert_test_4; SET citus.shard_replication_factor TO 1; CREATE TABLE dropcol_distributed(key int primary key, drop1 int, keep1 text, drop2 numeric, keep2 float); SELECT create_distributed_table('dropcol_distributed', 'key', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO dropcol_distributed AS dropcol (key, keep1, keep2) VALUES (1, '5', 5) ON CONFLICT(key) diff --git a/src/test/regress/expected/multi_utilities.out b/src/test/regress/expected/multi_utilities.out index ee2c00168..aed692172 100644 --- a/src/test/regress/expected/multi_utilities.out +++ b/src/test/regress/expected/multi_utilities.out @@ -6,9 +6,9 @@ SET citus.shard_count TO 2; SET citus.shard_replication_factor TO 1; CREATE TABLE sharded_table ( name text, id bigint ); SELECT create_distributed_table('sharded_table', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- COPY out is supported with distributed tables @@ -35,28 +35,28 @@ PREPARE sharded_update AS UPDATE sharded_table SET name = 'bob' WHERE id = 1; PREPARE sharded_delete AS DELETE FROM sharded_table WHERE id = 1; PREPARE sharded_query AS SELECT name FROM sharded_table WHERE id = 1; EXECUTE sharded_query; - name ------- + name +--------------------------------------------------------------------- (0 rows) EXECUTE sharded_insert; EXECUTE sharded_query; - name ------- + name +--------------------------------------------------------------------- adam (1 row) EXECUTE sharded_update; EXECUTE sharded_query; - name ------- + name +--------------------------------------------------------------------- bob (1 row) EXECUTE sharded_delete; EXECUTE sharded_query; - name ------- + name +--------------------------------------------------------------------- (0 rows) -- try to drop shards with where clause @@ -72,23 +72,23 @@ HINT: Use the DELETE command instead. -- lock shard metadata: take some share locks and exclusive locks BEGIN; SELECT lock_shard_metadata(5, ARRAY[999001, 999002, 999002]); - lock_shard_metadata ---------------------- - + lock_shard_metadata +--------------------------------------------------------------------- + (1 row) SELECT lock_shard_metadata(7, ARRAY[999001, 999003, 999004]); - lock_shard_metadata ---------------------- - + lock_shard_metadata +--------------------------------------------------------------------- + (1 row) SELECT locktype, objid, mode, granted FROM pg_locks WHERE objid IN (999001, 999002, 999003, 999004) ORDER BY objid, mode; - locktype | objid | mode | granted -----------+--------+---------------+--------- + locktype | objid | mode | granted +--------------------------------------------------------------------- advisory | 999001 | ExclusiveLock | t advisory | 999001 | ShareLock | t advisory | 999002 | ShareLock | t @@ -102,9 +102,9 @@ SELECT lock_shard_metadata(0, ARRAY[990001, 999002]); ERROR: unsupported lockmode 0 -- lock shard metadata: invalid shard ID SELECT lock_shard_metadata(5, ARRAY[0]); - lock_shard_metadata ---------------------- - + lock_shard_metadata +--------------------------------------------------------------------- + (1 row) -- lock shard metadata: lock nothing @@ -113,23 +113,23 @@ ERROR: no locks specified -- lock shard resources: take some share locks and exclusive locks BEGIN; SELECT lock_shard_resources(5, ARRAY[999001, 999002, 999002]); - lock_shard_resources ----------------------- - + lock_shard_resources +--------------------------------------------------------------------- + (1 row) SELECT lock_shard_resources(7, ARRAY[999001, 999003, 999004]); - lock_shard_resources ----------------------- - + lock_shard_resources +--------------------------------------------------------------------- + (1 row) SELECT locktype, objid, mode, granted FROM pg_locks WHERE objid IN (999001, 999002, 999003, 999004) ORDER BY objid, mode; - locktype | objid | mode | granted -----------+--------+---------------+--------- + locktype | objid | mode | granted +--------------------------------------------------------------------- advisory | 999001 | ExclusiveLock | t advisory | 999001 | ShareLock | t advisory | 999002 | ShareLock | t @@ -143,9 +143,9 @@ SELECT lock_shard_resources(0, ARRAY[990001, 999002]); ERROR: unsupported lockmode 0 -- lock shard metadata: invalid shard ID SELECT lock_shard_resources(5, ARRAY[-1]); - lock_shard_resources ----------------------- - + lock_shard_resources +--------------------------------------------------------------------- + (1 row) -- lock shard metadata: lock nothing @@ -159,24 +159,24 @@ SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 2; CREATE TABLE dustbunnies (id integer, name text, age integer); SELECT create_distributed_table('dustbunnies', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- add some data to the distributed table \copy dustbunnies (id, name) from stdin with csv CREATE TABLE second_dustbunnies(id integer, name text, age integer); SELECT master_create_distributed_table('second_dustbunnies', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT master_create_worker_shards('second_dustbunnies', 1, 2); - master_create_worker_shards ------------------------------ - + master_create_worker_shards +--------------------------------------------------------------------- + (1 row) -- following approach adapted from PostgreSQL's stats.sql file @@ -266,21 +266,21 @@ ANALYZE dustbunnies; -- verify that the VACUUM and ANALYZE ran \c - - - :worker_1_port SELECT wait_for_stats(); - wait_for_stats ----------------- - + wait_for_stats +--------------------------------------------------------------------- + (1 row) REFRESH MATERIALIZED VIEW prevcounts; SELECT pg_stat_get_vacuum_count('dustbunnies_990002'::regclass); - pg_stat_get_vacuum_count --------------------------- + pg_stat_get_vacuum_count +--------------------------------------------------------------------- 1 (1 row) SELECT pg_stat_get_analyze_count('dustbunnies_990002'::regclass); - pg_stat_get_analyze_count ---------------------------- + pg_stat_get_analyze_count +--------------------------------------------------------------------- 1 (1 row) @@ -295,27 +295,27 @@ VACUUM ANALYZE dustbunnies; \c - - - :worker_1_port SELECT relfilenode != :oldnode AS table_rewritten FROM pg_class WHERE oid='dustbunnies_990002'::regclass; - table_rewritten ------------------ + table_rewritten +--------------------------------------------------------------------- t (1 row) -- verify the VACUUM ANALYZE incremented both vacuum and analyze counts SELECT wait_for_stats(); - wait_for_stats ----------------- - + wait_for_stats +--------------------------------------------------------------------- + (1 row) SELECT pg_stat_get_vacuum_count('dustbunnies_990002'::regclass); - pg_stat_get_vacuum_count --------------------------- + pg_stat_get_vacuum_count +--------------------------------------------------------------------- 2 (1 row) SELECT pg_stat_get_analyze_count('dustbunnies_990002'::regclass); - pg_stat_get_analyze_count ---------------------------- + pg_stat_get_analyze_count +--------------------------------------------------------------------- 2 (1 row) @@ -331,16 +331,16 @@ VACUUM (FREEZE) dustbunnies; \c - - - :worker_1_port SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class WHERE oid='dustbunnies_990002'::regclass; - frozen_performed ------------------- + frozen_performed +--------------------------------------------------------------------- t (1 row) -- check there are no nulls in either column SELECT attname, null_frac FROM pg_stats WHERE tablename = 'dustbunnies_990002' ORDER BY attname; - attname | null_frac ----------+----------- + attname | null_frac +--------------------------------------------------------------------- age | 1 id | 0 name | 0 @@ -354,8 +354,8 @@ ANALYZE dustbunnies (name); \c - - - :worker_1_port SELECT attname, null_frac FROM pg_stats WHERE tablename = 'dustbunnies_990002' ORDER BY attname; - attname | null_frac ----------+----------- + attname | null_frac +--------------------------------------------------------------------- age | 1 id | 0 name | 0.166667 @@ -370,22 +370,22 @@ HINT: Provide a specific table in order to VACUUM distributed tables. VACUUM dustbunnies, second_dustbunnies; -- check the current number of vacuum and analyze run on dustbunnies SELECT run_command_on_workers($$SELECT wait_for_stats()$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"") (localhost,57638,t,"") (2 rows) SELECT run_command_on_workers($$SELECT pg_stat_get_vacuum_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,4) (localhost,57638,t,4) (2 rows) SELECT run_command_on_workers($$SELECT pg_stat_get_analyze_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,3) (localhost,57638,t,3) (2 rows) @@ -401,36 +401,36 @@ HINT: Set citus.enable_ddl_propagation to true in order to send targeted ANALYZ SET citus.enable_ddl_propagation to DEFAULT; -- should not propagate the vacuum and analyze SELECT run_command_on_workers($$SELECT wait_for_stats()$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"") (localhost,57638,t,"") (2 rows) SELECT run_command_on_workers($$SELECT pg_stat_get_vacuum_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,4) (localhost,57638,t,4) (2 rows) SELECT run_command_on_workers($$SELECT pg_stat_get_analyze_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,3) (localhost,57638,t,3) (2 rows) -- test worker_hash SELECT worker_hash(123); - worker_hash -------------- + worker_hash +--------------------------------------------------------------------- -205084363 (1 row) SELECT worker_hash('1997-08-08'::date); - worker_hash -------------- + worker_hash +--------------------------------------------------------------------- -499701663 (1 row) @@ -439,8 +439,8 @@ SELECT worker_hash('(1, 2)'); ERROR: cannot find a hash function for the input type HINT: Cast input to a data type with a hash function. SELECT worker_hash('(1, 2)'::test_composite_type); - worker_hash -------------- + worker_hash +--------------------------------------------------------------------- -1895345704 (1 row) @@ -448,8 +448,8 @@ SELECT citus_truncate_trigger(); ERROR: must be called as trigger -- confirm that citus_create_restore_point works SELECT 1 FROM citus_create_restore_point('regression-test'); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/multi_utility_statements.out b/src/test/regress/expected/multi_utility_statements.out index 31888483a..1a4daa418 100644 --- a/src/test/regress/expected/multi_utility_statements.out +++ b/src/test/regress/expected/multi_utility_statements.out @@ -5,7 +5,7 @@ -- distributed tables. Currently we only support CREATE TABLE AS (SELECT..), -- DECLARE CURSOR, and COPY ... TO statements. SET citus.next_shard_id TO 1000000; -CREATE TEMP TABLE lineitem_pricing_summary AS +CREATE TEMP TABLE lineitem_pricing_summary AS ( SELECT l_returnflag, @@ -30,8 +30,8 @@ CREATE TEMP TABLE lineitem_pricing_summary AS l_linestatus ); SELECT * FROM lineitem_pricing_summary ORDER BY l_returnflag, l_linestatus; - l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order ---------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+------------- + l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order +--------------------------------------------------------------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 @@ -39,7 +39,7 @@ SELECT * FROM lineitem_pricing_summary ORDER BY l_returnflag, l_linestatus; (4 rows) -- Test we can handle joins -CREATE TABLE shipping_priority AS +CREATE TABLE shipping_priority AS ( SELECT l_orderkey, @@ -65,8 +65,8 @@ CREATE TABLE shipping_priority AS o_orderdate ); SELECT * FROM shipping_priority; - l_orderkey | revenue | o_orderdate | o_shippriority -------------+-------------+-------------+---------------- + l_orderkey | revenue | o_orderdate | o_shippriority +--------------------------------------------------------------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 10916 | 242749.1996 | 03-11-1995 | 0 @@ -138,7 +138,7 @@ COPY 25 COPY nation TO STDOUT; 0 ALGERIA 0 haggle. carefully final deposits detect slyly agai 1 ARGENTINA 1 al foxes promise slyly according to the regular accounts. bold requests alon -2 BRAZIL 1 y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special +2 BRAZIL 1 y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3 CANADA 1 eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4 EGYPT 4 y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5 ETHIOPIA 0 ven packages wake quickly. regu @@ -146,7 +146,7 @@ COPY nation TO STDOUT; 7 GERMANY 3 l platelets. regular accounts x-ray: unusual, regular acco 8 INDIA 2 ss excuses cajole slyly across the packages. deposits print aroun 9 INDONESIA 2 slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull -10 IRAN 4 efully alongside of the slyly final dependencies. +10 IRAN 4 efully alongside of the slyly final dependencies. 11 IRAQ 4 nic deposits boost atop the quickly final requests? quickly regula 12 JAPAN 2 ously. final, express gifts cajole a 13 JORDAN 4 ic deposits are blithely about the carefully regular pa @@ -157,100 +157,98 @@ COPY nation TO STDOUT; 18 CHINA 2 c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos 19 ROMANIA 3 ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account 20 SAUDI ARABIA 4 ts. silent requests haggle. closely express packages sleep across the blithely -21 VIETNAM 2 hely enticingly express accounts. even, final +21 VIETNAM 2 hely enticingly express accounts. even, final 22 RUSSIA 3 requests against the platelets use never according to the quickly regular pint 23 UNITED KINGDOM 3 eans boost carefully special requests. accounts are. carefull 24 UNITED STATES 1 y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be -- ensure individual cols can be copied out, too COPY nation(n_name) TO STDOUT; -ALGERIA -ARGENTINA -BRAZIL -CANADA -EGYPT -ETHIOPIA -FRANCE -GERMANY -INDIA -INDONESIA -IRAN -IRAQ -JAPAN -JORDAN -KENYA -MOROCCO -MOZAMBIQUE -PERU -CHINA -ROMANIA -SAUDI ARABIA -VIETNAM -RUSSIA -UNITED KINGDOM -UNITED STATES +ALGERIA +ARGENTINA +BRAZIL +CANADA +EGYPT +ETHIOPIA +FRANCE +GERMANY +INDIA +INDONESIA +IRAN +IRAQ +JAPAN +JORDAN +KENYA +MOROCCO +MOZAMBIQUE +PERU +CHINA +ROMANIA +SAUDI ARABIA +VIETNAM +RUSSIA +UNITED KINGDOM +UNITED STATES -- Test that we can create on-commit drop tables, along with changing column names BEGIN; -CREATE TEMP TABLE customer_few (customer_key) ON COMMIT DROP AS +CREATE TEMP TABLE customer_few (customer_key) ON COMMIT DROP AS (SELECT * FROM customer WHERE c_nationkey = 1 ORDER BY c_custkey LIMIT 10); -SELECT customer_key, c_name, c_address +SELECT customer_key, c_name, c_address FROM customer_few ORDER BY customer_key LIMIT 5; - customer_key | c_name | c_address ---------------+--------------------+----------------------------------------- + customer_key | c_name | c_address +--------------------------------------------------------------------- 3 | Customer#000000003 | MG9kdTD2WBHm - 14 | Customer#000000014 | KXkletMlL2JQEA + 14 | Customer#000000014 | KXkletMlL2JQEA 30 | Customer#000000030 | nJDsELGAavU63Jl0c5NKsKfL8rIJQQkQnYL2QJY 59 | Customer#000000059 | zLOCP0wh92OtBihgspOGl4 106 | Customer#000000106 | xGCOEAUjUNG (5 rows) COMMIT; -SELECT customer_key, c_name, c_address +SELECT customer_key, c_name, c_address FROM customer_few ORDER BY customer_key LIMIT 5; ERROR: relation "customer_few" does not exist -LINE 2: FROM customer_few ORDER BY customer_key LIMIT 5; - ^ -- Test DECLARE CURSOR .. WITH HOLD without parameters that calls ReScan on the top-level CustomScan CREATE TABLE cursor_me (x int, y int); SELECT create_distributed_table('cursor_me', 'x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO cursor_me SELECT s/10, s FROM generate_series(1, 100) s; DECLARE holdCursor CURSOR WITH HOLD FOR SELECT * FROM cursor_me WHERE x = 1 ORDER BY y; FETCH NEXT FROM holdCursor; - x | y ----+---- + x | y +--------------------------------------------------------------------- 1 | 10 (1 row) FETCH FORWARD 3 FROM holdCursor; - x | y ----+---- + x | y +--------------------------------------------------------------------- 1 | 11 1 | 12 1 | 13 (3 rows) FETCH LAST FROM holdCursor; - x | y ----+---- + x | y +--------------------------------------------------------------------- 1 | 19 (1 row) FETCH BACKWARD 3 FROM holdCursor; - x | y ----+---- + x | y +--------------------------------------------------------------------- 1 | 18 1 | 17 1 | 16 (3 rows) FETCH FORWARD 3 FROM holdCursor; - x | y ----+---- + x | y +--------------------------------------------------------------------- 1 | 17 1 | 18 1 | 19 @@ -265,7 +263,7 @@ $$ LANGUAGE SQL; SELECT declares_cursor(5); ERROR: Cursors for queries on distributed tables with parameters are currently unsupported CREATE OR REPLACE FUNCTION cursor_plpgsql(p int) -RETURNS SETOF int AS $$ +RETURNS SETOF int AS $$ DECLARE val int; my_cursor CURSOR (a INTEGER) FOR SELECT y FROM cursor_me WHERE x = $1 ORDER BY y; @@ -285,8 +283,8 @@ BEGIN END; $$ LANGUAGE plpgsql; SELECT cursor_plpgsql(4); - cursor_plpgsql ----------------- + cursor_plpgsql +--------------------------------------------------------------------- 40 41 42 @@ -305,17 +303,17 @@ DROP TABLE cursor_me; -- Test DECLARE CURSOR statement with SCROLL DECLARE holdCursor SCROLL CURSOR WITH HOLD FOR SELECT l_orderkey, l_linenumber, l_quantity, l_discount - FROM lineitem + FROM lineitem ORDER BY l_orderkey, l_linenumber; FETCH NEXT FROM holdCursor; - l_orderkey | l_linenumber | l_quantity | l_discount -------------+--------------+------------+------------ + l_orderkey | l_linenumber | l_quantity | l_discount +--------------------------------------------------------------------- 1 | 1 | 17.00 | 0.04 (1 row) FETCH FORWARD 5 FROM holdCursor; - l_orderkey | l_linenumber | l_quantity | l_discount -------------+--------------+------------+------------ + l_orderkey | l_linenumber | l_quantity | l_discount +--------------------------------------------------------------------- 1 | 2 | 36.00 | 0.09 1 | 3 | 8.00 | 0.10 1 | 4 | 28.00 | 0.09 @@ -324,14 +322,14 @@ FETCH FORWARD 5 FROM holdCursor; (5 rows) FETCH LAST FROM holdCursor; - l_orderkey | l_linenumber | l_quantity | l_discount -------------+--------------+------------+------------ + l_orderkey | l_linenumber | l_quantity | l_discount +--------------------------------------------------------------------- 14947 | 2 | 29.00 | 0.04 (1 row) FETCH BACKWARD 5 FROM holdCursor; - l_orderkey | l_linenumber | l_quantity | l_discount -------------+--------------+------------+------------ + l_orderkey | l_linenumber | l_quantity | l_discount +--------------------------------------------------------------------- 14947 | 1 | 14.00 | 0.09 14946 | 2 | 37.00 | 0.01 14946 | 1 | 38.00 | 0.00 @@ -346,14 +344,14 @@ DECLARE noHoldCursor SCROLL CURSOR FOR FROM lineitem ORDER BY l_orderkey, l_linenumber; FETCH ABSOLUTE 5 FROM noHoldCursor; - l_orderkey | l_linenumber | l_quantity | l_discount -------------+--------------+------------+------------ + l_orderkey | l_linenumber | l_quantity | l_discount +--------------------------------------------------------------------- 1 | 5 | 24.00 | 0.10 (1 row) FETCH BACKWARD noHoldCursor; - l_orderkey | l_linenumber | l_quantity | l_discount -------------+--------------+------------+------------ + l_orderkey | l_linenumber | l_quantity | l_discount +--------------------------------------------------------------------- 1 | 4 | 28.00 | 0.09 (1 row) diff --git a/src/test/regress/expected/multi_view.out b/src/test/regress/expected/multi_view.out index 4671a2d80..82e20b8f3 100644 --- a/src/test/regress/expected/multi_view.out +++ b/src/test/regress/expected/multi_view.out @@ -6,14 +6,14 @@ -- router queries, single row inserts, multi row inserts via insert -- into select, multi row insert via copy commands. SELECT count(*) FROM lineitem_hash_part; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) SELECT count(*) FROM orders_hash_part; - count -------- + count +--------------------------------------------------------------------- 2985 (1 row) @@ -21,31 +21,31 @@ SELECT count(*) FROM orders_hash_part; CREATE VIEW priority_orders AS SELECT * FROM orders_hash_part WHERE o_orderpriority < '3-MEDIUM'; -- aggregate pushdown SELECT o_orderpriority, count(*) FROM priority_orders GROUP BY 1 ORDER BY 2, 1; - o_orderpriority | count ------------------+------- + o_orderpriority | count +--------------------------------------------------------------------- 2-HIGH | 593 1-URGENT | 604 (2 rows) SELECT o_orderpriority, count(*) FROM orders_hash_part WHERE o_orderpriority < '3-MEDIUM' GROUP BY 1 ORDER BY 2,1; - o_orderpriority | count ------------------+------- + o_orderpriority | count +--------------------------------------------------------------------- 2-HIGH | 593 1-URGENT | 604 (2 rows) -- filters SELECT o_orderpriority, count(*) as all, count(*) FILTER (WHERE o_orderstatus ='F') as fullfilled FROM priority_orders GROUP BY 1 ORDER BY 2, 1; - o_orderpriority | all | fullfilled ------------------+-----+------------ + o_orderpriority | all | fullfilled +--------------------------------------------------------------------- 2-HIGH | 593 | 271 1-URGENT | 604 | 280 (2 rows) -- having SELECT o_orderdate, count(*) from priority_orders group by 1 having (count(*) > 3) order by 2 desc, 1 desc; - o_orderdate | count --------------+------- + o_orderdate | count +--------------------------------------------------------------------- 08-20-1996 | 5 10-10-1994 | 4 05-05-1994 | 4 @@ -55,8 +55,8 @@ SELECT o_orderdate, count(*) from priority_orders group by 1 having (count(*) > -- having with filters SELECT o_orderdate, count(*) as all, count(*) FILTER(WHERE o_orderstatus = 'F') from priority_orders group by 1 having (count(*) > 3) order by 2 desc, 1 desc; - o_orderdate | all | count --------------+-----+------- + o_orderdate | all | count +--------------------------------------------------------------------- 08-20-1996 | 5 | 0 10-10-1994 | 4 | 4 05-05-1994 | 4 | 4 @@ -66,8 +66,8 @@ SELECT o_orderdate, count(*) as all, count(*) FILTER(WHERE o_orderstatus = 'F') -- limit SELECT o_orderkey, o_totalprice from orders_hash_part order by 2 desc, 1 asc limit 5 ; - o_orderkey | o_totalprice -------------+-------------- + o_orderkey | o_totalprice +--------------------------------------------------------------------- 4421 | 401055.62 10209 | 400191.77 11142 | 395039.05 @@ -76,15 +76,15 @@ SELECT o_orderkey, o_totalprice from orders_hash_part order by 2 desc, 1 asc lim (5 rows) SELECT o_orderkey, o_totalprice from priority_orders order by 2 desc, 1 asc limit 1 ; - o_orderkey | o_totalprice -------------+-------------- + o_orderkey | o_totalprice +--------------------------------------------------------------------- 14179 | 384265.43 (1 row) CREATE VIEW priority_lineitem AS SELECT li.* FROM lineitem_hash_part li JOIN priority_orders ON (l_orderkey = o_orderkey); SELECT l_orderkey, count(*) FROM priority_lineitem GROUP BY 1 ORDER BY 2 DESC, 1 LIMIT 5; - l_orderkey | count -------------+------- + l_orderkey | count +--------------------------------------------------------------------- 7 | 7 225 | 7 226 | 7 @@ -95,29 +95,29 @@ SELECT l_orderkey, count(*) FROM priority_lineitem GROUP BY 1 ORDER BY 2 DESC, 1 CREATE VIEW air_shipped_lineitems AS SELECT * FROM lineitem_hash_part WHERE l_shipmode = 'AIR'; -- join between view and table SELECT count(*) FROM orders_hash_part join air_shipped_lineitems ON (o_orderkey = l_orderkey); - count -------- + count +--------------------------------------------------------------------- 1706 (1 row) -- join between views SELECT count(*) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey); - count -------- + count +--------------------------------------------------------------------- 700 (1 row) -- count distinct on partition column is supported SELECT count(distinct o_orderkey) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey); - count -------- + count +--------------------------------------------------------------------- 551 (1 row) -- count distinct on non-partition column is supported SELECT count(distinct o_orderpriority) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey); - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -125,8 +125,8 @@ SELECT count(distinct o_orderpriority) FROM priority_orders join air_shipped_lin SELECT count(distinct o_orderkey) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey) WHERE (o_orderkey = 231); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -134,57 +134,57 @@ SELECT count(distinct o_orderkey) FROM priority_orders join air_shipped_lineitem SELECT distinct(o_orderkey) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey) WHERE (o_orderkey = 231); - o_orderkey ------------- + o_orderkey +--------------------------------------------------------------------- 231 (1 row) -- left join support depends on flattening of the query SELECT o_orderkey, l_orderkey FROM priority_orders left join air_shipped_lineitems ON (o_orderkey = l_orderkey) ORDER BY o_orderkey LIMIT 1; - o_orderkey | l_orderkey -------------+------------ - 2 | + o_orderkey | l_orderkey +--------------------------------------------------------------------- + 2 | (1 row) -- however, this works SELECT count(*) FROM priority_orders left join lineitem_hash_part ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR'; - count -------- + count +--------------------------------------------------------------------- 700 (1 row) -- view on the inner side is supported SELECT count(*) FROM priority_orders right join lineitem_hash_part ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR'; - count -------- + count +--------------------------------------------------------------------- 1706 (1 row) -- view on the outer side is supported SELECT count(*) FROM lineitem_hash_part right join priority_orders ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR'; - count -------- + count +--------------------------------------------------------------------- 700 (1 row) -- left join on router query is supported SELECT o_orderkey, l_linenumber FROM priority_orders left join air_shipped_lineitems ON (o_orderkey = l_orderkey) WHERE o_orderkey = 2; - o_orderkey | l_linenumber -------------+-------------- - 2 | + o_orderkey | l_linenumber +--------------------------------------------------------------------- + 2 | (1 row) -- repartition query on view join -- it passes planning, fails at execution stage SET client_min_messages TO DEBUG1; SELECT * FROM priority_orders JOIN air_shipped_lineitems ON (o_custkey = l_suppkey) ORDER BY o_orderkey DESC, o_custkey DESC, o_orderpriority DESC LIMIT 5; -DEBUG: generating subplan 22_1 for subquery SELECT lineitem_hash_part.l_orderkey, lineitem_hash_part.l_partkey, lineitem_hash_part.l_suppkey, lineitem_hash_part.l_linenumber, lineitem_hash_part.l_quantity, lineitem_hash_part.l_extendedprice, lineitem_hash_part.l_discount, lineitem_hash_part.l_tax, lineitem_hash_part.l_returnflag, lineitem_hash_part.l_linestatus, lineitem_hash_part.l_shipdate, lineitem_hash_part.l_commitdate, lineitem_hash_part.l_receiptdate, lineitem_hash_part.l_shipinstruct, lineitem_hash_part.l_shipmode, lineitem_hash_part.l_comment FROM public.lineitem_hash_part WHERE (lineitem_hash_part.l_shipmode OPERATOR(pg_catalog.=) 'AIR'::bpchar) -DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT priority_orders.o_orderkey, priority_orders.o_custkey, priority_orders.o_orderstatus, priority_orders.o_totalprice, priority_orders.o_orderdate, priority_orders.o_orderpriority, priority_orders.o_clerk, priority_orders.o_shippriority, priority_orders.o_comment, air_shipped_lineitems.l_orderkey, air_shipped_lineitems.l_partkey, air_shipped_lineitems.l_suppkey, air_shipped_lineitems.l_linenumber, air_shipped_lineitems.l_quantity, air_shipped_lineitems.l_extendedprice, air_shipped_lineitems.l_discount, air_shipped_lineitems.l_tax, air_shipped_lineitems.l_returnflag, air_shipped_lineitems.l_linestatus, air_shipped_lineitems.l_shipdate, air_shipped_lineitems.l_commitdate, air_shipped_lineitems.l_receiptdate, air_shipped_lineitems.l_shipinstruct, air_shipped_lineitems.l_shipmode, air_shipped_lineitems.l_comment FROM ((SELECT orders_hash_part.o_orderkey, orders_hash_part.o_custkey, orders_hash_part.o_orderstatus, orders_hash_part.o_totalprice, orders_hash_part.o_orderdate, orders_hash_part.o_orderpriority, orders_hash_part.o_clerk, orders_hash_part.o_shippriority, orders_hash_part.o_comment FROM public.orders_hash_part WHERE (orders_hash_part.o_orderpriority OPERATOR(pg_catalog.<) '3-MEDIUM'::bpchar)) priority_orders JOIN (SELECT intermediate_result.l_orderkey, intermediate_result.l_partkey, intermediate_result.l_suppkey, intermediate_result.l_linenumber, intermediate_result.l_quantity, intermediate_result.l_extendedprice, intermediate_result.l_discount, intermediate_result.l_tax, intermediate_result.l_returnflag, intermediate_result.l_linestatus, intermediate_result.l_shipdate, intermediate_result.l_commitdate, intermediate_result.l_receiptdate, intermediate_result.l_shipinstruct, intermediate_result.l_shipmode, intermediate_result.l_comment FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint, l_partkey integer, l_suppkey integer, l_linenumber integer, l_quantity numeric(15,2), l_extendedprice numeric(15,2), l_discount numeric(15,2), l_tax numeric(15,2), l_returnflag character(1), l_linestatus character(1), l_shipdate date, l_commitdate date, l_receiptdate date, l_shipinstruct character(25), l_shipmode character(10), l_comment character varying(44))) air_shipped_lineitems ON ((priority_orders.o_custkey OPERATOR(pg_catalog.=) air_shipped_lineitems.l_suppkey))) ORDER BY priority_orders.o_orderkey DESC, priority_orders.o_custkey DESC, priority_orders.o_orderpriority DESC LIMIT 5 +DEBUG: generating subplan XXX_1 for subquery SELECT lineitem_hash_part.l_orderkey, lineitem_hash_part.l_partkey, lineitem_hash_part.l_suppkey, lineitem_hash_part.l_linenumber, lineitem_hash_part.l_quantity, lineitem_hash_part.l_extendedprice, lineitem_hash_part.l_discount, lineitem_hash_part.l_tax, lineitem_hash_part.l_returnflag, lineitem_hash_part.l_linestatus, lineitem_hash_part.l_shipdate, lineitem_hash_part.l_commitdate, lineitem_hash_part.l_receiptdate, lineitem_hash_part.l_shipinstruct, lineitem_hash_part.l_shipmode, lineitem_hash_part.l_comment FROM public.lineitem_hash_part WHERE (lineitem_hash_part.l_shipmode OPERATOR(pg_catalog.=) 'AIR'::bpchar) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT priority_orders.o_orderkey, priority_orders.o_custkey, priority_orders.o_orderstatus, priority_orders.o_totalprice, priority_orders.o_orderdate, priority_orders.o_orderpriority, priority_orders.o_clerk, priority_orders.o_shippriority, priority_orders.o_comment, air_shipped_lineitems.l_orderkey, air_shipped_lineitems.l_partkey, air_shipped_lineitems.l_suppkey, air_shipped_lineitems.l_linenumber, air_shipped_lineitems.l_quantity, air_shipped_lineitems.l_extendedprice, air_shipped_lineitems.l_discount, air_shipped_lineitems.l_tax, air_shipped_lineitems.l_returnflag, air_shipped_lineitems.l_linestatus, air_shipped_lineitems.l_shipdate, air_shipped_lineitems.l_commitdate, air_shipped_lineitems.l_receiptdate, air_shipped_lineitems.l_shipinstruct, air_shipped_lineitems.l_shipmode, air_shipped_lineitems.l_comment FROM ((SELECT orders_hash_part.o_orderkey, orders_hash_part.o_custkey, orders_hash_part.o_orderstatus, orders_hash_part.o_totalprice, orders_hash_part.o_orderdate, orders_hash_part.o_orderpriority, orders_hash_part.o_clerk, orders_hash_part.o_shippriority, orders_hash_part.o_comment FROM public.orders_hash_part WHERE (orders_hash_part.o_orderpriority OPERATOR(pg_catalog.<) '3-MEDIUM'::bpchar)) priority_orders JOIN (SELECT intermediate_result.l_orderkey, intermediate_result.l_partkey, intermediate_result.l_suppkey, intermediate_result.l_linenumber, intermediate_result.l_quantity, intermediate_result.l_extendedprice, intermediate_result.l_discount, intermediate_result.l_tax, intermediate_result.l_returnflag, intermediate_result.l_linestatus, intermediate_result.l_shipdate, intermediate_result.l_commitdate, intermediate_result.l_receiptdate, intermediate_result.l_shipinstruct, intermediate_result.l_shipmode, intermediate_result.l_comment FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint, l_partkey integer, l_suppkey integer, l_linenumber integer, l_quantity numeric(15,2), l_extendedprice numeric(15,2), l_discount numeric(15,2), l_tax numeric(15,2), l_returnflag character(1), l_linestatus character(1), l_shipdate date, l_commitdate date, l_receiptdate date, l_shipinstruct character(25), l_shipmode character(10), l_comment character varying(44))) air_shipped_lineitems ON ((priority_orders.o_custkey OPERATOR(pg_catalog.=) air_shipped_lineitems.l_suppkey))) ORDER BY priority_orders.o_orderkey DESC, priority_orders.o_custkey DESC, priority_orders.o_orderpriority DESC LIMIT 5 DEBUG: push down of limit count: 5 - o_orderkey | o_custkey | o_orderstatus | o_totalprice | o_orderdate | o_orderpriority | o_clerk | o_shippriority | o_comment | l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment -------------+-----------+---------------+--------------+-------------+-----------------+-----------------+----------------+-------------------------------------------------------+------------+-----------+-----------+--------------+------------+-----------------+------------+-------+--------------+--------------+------------+--------------+---------------+---------------------------+------------+------------------------------------------- - 14821 | 1435 | O | 322002.95 | 06-12-1998 | 2-HIGH | Clerk#000000630 | 0 | n packages are furiously ironic ideas. d | 1607 | 118923 | 1435 | 2 | 37.00 | 71851.04 | 0.05 | 0.02 | N | O | 02-27-1996 | 02-18-1996 | 03-16-1996 | NONE | AIR | alongside + o_orderkey | o_custkey | o_orderstatus | o_totalprice | o_orderdate | o_orderpriority | o_clerk | o_shippriority | o_comment | l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment +--------------------------------------------------------------------- + 14821 | 1435 | O | 322002.95 | 06-12-1998 | 2-HIGH | Clerk#000000630 | 0 | n packages are furiously ironic ideas. d | 1607 | 118923 | 1435 | 2 | 37.00 | 71851.04 | 0.05 | 0.02 | N | O | 02-27-1996 | 02-18-1996 | 03-16-1996 | NONE | AIR | alongside 14790 | 613 | O | 270163.54 | 08-21-1996 | 2-HIGH | Clerk#000000347 | 0 | p. regular deposits wake. final n | 2629 | 123076 | 613 | 2 | 31.00 | 34071.17 | 0.08 | 0.03 | N | O | 05-24-1998 | 05-26-1998 | 06-10-1998 | COLLECT COD | AIR | ate blithely bold, regular deposits. bold 14758 | 1225 | F | 37812.49 | 10-27-1993 | 2-HIGH | Clerk#000000687 | 0 | ages nag about the furio | 9156 | 176190 | 1225 | 2 | 22.00 | 27856.18 | 0.03 | 0.00 | R | F | 02-08-1994 | 04-01-1994 | 02-24-1994 | DELIVER IN PERSON | AIR | equests dete 14725 | 569 | O | 261801.45 | 06-17-1995 | 2-HIGH | Clerk#000000177 | 0 | ng asymptotes. final, ironic accounts cajole after | 14688 | 173017 | 569 | 3 | 10.00 | 10900.10 | 0.02 | 0.08 | N | O | 03-14-1997 | 04-22-1997 | 04-05-1997 | COLLECT COD | AIR | riously even packages sleep a @@ -193,8 +193,8 @@ DEBUG: push down of limit count: 5 RESET client_min_messages; SELECT count(*) FROM priority_orders JOIN air_shipped_lineitems ON (o_custkey = l_suppkey); - count -------- + count +--------------------------------------------------------------------- 192 (1 row) @@ -211,8 +211,8 @@ SELECT l_suppkey, count(*) FROM (SELECT l_suppkey, l_shipdate, count(*) FROM lineitem_hash_part WHERE l_shipmode = 'AIR' GROUP BY l_suppkey, l_shipdate) supps GROUP BY l_suppkey ORDER BY 2 DESC, 1 LIMIT 5; - l_suppkey | count ------------+------- + l_suppkey | count +--------------------------------------------------------------------- 7680 | 4 160 | 3 1042 | 3 @@ -231,8 +231,8 @@ DETAIL: Subqueries without group by clause are not supported yet -- repartition query on view with single table subquery CREATE VIEW supp_count_view AS SELECT * FROM (SELECT l_suppkey, count(*) FROM lineitem_hash_part GROUP BY 1) s1; SELECT * FROM supp_count_view ORDER BY 2 DESC, 1 LIMIT 10; - l_suppkey | count ------------+------- + l_suppkey | count +--------------------------------------------------------------------- 6104 | 8 1868 | 6 5532 | 6 @@ -251,8 +251,8 @@ CREATE VIEW lineitems_by_shipping_method AS SELECT l_shipmode, count(*) as cnt FROM lineitem_hash_part GROUP BY 1; -- following will be supported via recursive planning SELECT * FROM lineitems_by_shipping_method ORDER BY 1,2 LIMIT 5; - l_shipmode | cnt -------------+------ + l_shipmode | cnt +--------------------------------------------------------------------- AIR | 1706 FOB | 1709 MAIL | 1739 @@ -269,8 +269,8 @@ CREATE VIEW lineitems_by_orderkey AS GROUP BY 1; -- this should work since we're able to push down this query SELECT * FROM lineitems_by_orderkey ORDER BY 2 DESC, 1 ASC LIMIT 10; - l_orderkey | count -------------+------- + l_orderkey | count +--------------------------------------------------------------------- 7 | 7 68 | 7 129 | 7 @@ -285,8 +285,8 @@ SELECT * FROM lineitems_by_orderkey ORDER BY 2 DESC, 1 ASC LIMIT 10; -- it would also work since it is made router plannable SELECT * FROM lineitems_by_orderkey WHERE l_orderkey = 100; - l_orderkey | count -------------+------- + l_orderkey | count +--------------------------------------------------------------------- 100 | 5 (1 row) @@ -303,8 +303,8 @@ CREATE VIEW recent_users AS GROUP BY user_id HAVING max(time) > '2017-11-23 16:20:33.264457'::timestamp order by 2 DESC; SELECT * FROM recent_users ORDER BY 2 DESC, 1 DESC; - user_id | lastseen ----------+--------------------------------- + user_id | lastseen +--------------------------------------------------------------------- 1 | Thu Nov 23 17:30:34.635085 2017 3 | Thu Nov 23 17:18:51.048758 2017 5 | Thu Nov 23 16:48:32.08896 2017 @@ -315,15 +315,15 @@ CREATE VIEW recent_events AS SELECT user_id, time FROM events_table WHERE time > '2017-11-23 16:20:33.264457'::timestamp; SELECT count(*) FROM recent_events; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) -- count number of events of recent_users SELECT count(*) FROM recent_users ru JOIN events_table et ON (ru.user_id = et.user_id); - count -------- + count +--------------------------------------------------------------------- 50 (1 row) @@ -334,8 +334,8 @@ SELECT ru.user_id, count(*) ON (ru.user_id = et.user_id) GROUP BY ru.user_id ORDER BY 2 DESC, 1; - user_id | count ----------+------- + user_id | count +--------------------------------------------------------------------- 3 | 21 1 | 15 5 | 14 @@ -348,8 +348,8 @@ SELECT ru.user_id, count(*) ON (ru.user_id = et.user_id) GROUP BY ru.user_id ORDER BY 2 DESC, 1; - user_id | count ----------+------- + user_id | count +--------------------------------------------------------------------- 3 | 21 1 | 15 5 | 14 @@ -364,8 +364,8 @@ SELECT * FROM GROUP BY ru.user_id ORDER BY 2 DESC, 1) s1 ORDER BY 2 DESC, 1; - user_id | count ----------+------- + user_id | count +--------------------------------------------------------------------- 3 | 21 1 | 15 5 | 14 @@ -381,8 +381,8 @@ SELECT * FROM GROUP BY ru.user_id ORDER BY 2 DESC, 1) s1 ORDER BY 2 DESC, 1; - user_id | count ----------+------- + user_id | count +--------------------------------------------------------------------- 1 | 24 3 | 23 5 | 7 @@ -391,8 +391,8 @@ ORDER BY 2 DESC, 1; -- join between views -- recent users who has an event in recent events SELECT ru.user_id FROM recent_users ru JOIN recent_events re USING(user_id) GROUP BY ru.user_id ORDER BY ru.user_id; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 3 (2 rows) @@ -403,8 +403,8 @@ SELECT count(*) FROM ( SELECT re.*, ru.user_id AS recent_user FROM recent_events re LEFT JOIN recent_users ru USING(user_id)) reu WHERE recent_user IS NULL; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -412,37 +412,37 @@ SELECT count(*) FROM ( SELECT count(*) FROM recent_events re LEFT JOIN recent_users ru ON(ru.user_id = re.user_id) WHERE ru.user_id IS NULL; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) -- join between view and table -- users who has recent activity and they have an entry with value_1 is less than 3 SELECT ut.* FROM recent_users ru JOIN users_table ut USING (user_id) WHERE ut.value_1 < 3 ORDER BY 1,2; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | - 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | - 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 4 | - 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | - 3 | Thu Nov 23 06:20:05.854857 2017 | 1 | 4 | 2 | - 3 | Thu Nov 23 09:57:41.540228 2017 | 2 | 2 | 3 | - 3 | Thu Nov 23 11:18:53.114408 2017 | 2 | 2 | 0 | - 3 | Thu Nov 23 12:56:49.29191 2017 | 0 | 5 | 1 | - 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | - 5 | Wed Nov 22 20:43:18.667473 2017 | 0 | 3 | 2 | - 5 | Wed Nov 22 21:02:07.575129 2017 | 2 | 0 | 2 | - 5 | Wed Nov 22 22:10:24.315371 2017 | 1 | 2 | 1 | - 5 | Thu Nov 23 00:54:44.192608 2017 | 1 | 3 | 2 | - 5 | Thu Nov 23 07:47:09.542999 2017 | 1 | 4 | 3 | - 5 | Thu Nov 23 09:05:08.53142 2017 | 2 | 2 | 2 | - 5 | Thu Nov 23 09:17:47.706703 2017 | 2 | 5 | 3 | - 5 | Thu Nov 23 10:15:31.764558 2017 | 2 | 2 | 2 | - 5 | Thu Nov 23 14:29:02.557934 2017 | 2 | 1 | 2 | - 5 | Thu Nov 23 15:55:08.493462 2017 | 0 | 3 | 3 | - 5 | Thu Nov 23 16:28:38.455322 2017 | 2 | 5 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | + 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | + 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 4 | + 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | + 3 | Thu Nov 23 06:20:05.854857 2017 | 1 | 4 | 2 | + 3 | Thu Nov 23 09:57:41.540228 2017 | 2 | 2 | 3 | + 3 | Thu Nov 23 11:18:53.114408 2017 | 2 | 2 | 0 | + 3 | Thu Nov 23 12:56:49.29191 2017 | 0 | 5 | 1 | + 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | + 5 | Wed Nov 22 20:43:18.667473 2017 | 0 | 3 | 2 | + 5 | Wed Nov 22 21:02:07.575129 2017 | 2 | 0 | 2 | + 5 | Wed Nov 22 22:10:24.315371 2017 | 1 | 2 | 1 | + 5 | Thu Nov 23 00:54:44.192608 2017 | 1 | 3 | 2 | + 5 | Thu Nov 23 07:47:09.542999 2017 | 1 | 4 | 3 | + 5 | Thu Nov 23 09:05:08.53142 2017 | 2 | 2 | 2 | + 5 | Thu Nov 23 09:17:47.706703 2017 | 2 | 5 | 3 | + 5 | Thu Nov 23 10:15:31.764558 2017 | 2 | 2 | 2 | + 5 | Thu Nov 23 14:29:02.557934 2017 | 2 | 1 | 2 | + 5 | Thu Nov 23 15:55:08.493462 2017 | 0 | 3 | 3 | + 5 | Thu Nov 23 16:28:38.455322 2017 | 2 | 5 | 4 | (21 rows) -- determine if a recent user has done a given event type or not @@ -451,8 +451,8 @@ SELECT ru.user_id, CASE WHEN et.user_id IS NULL THEN 'NO' ELSE 'YES' END as done LEFT JOIN events_table et ON(ru.user_id = et.user_id AND et.event_type = 6) ORDER BY 2 DESC, 1; - user_id | done_event ----------+------------ + user_id | done_event +--------------------------------------------------------------------- 1 | YES 3 | NO 5 | NO @@ -466,8 +466,8 @@ SELECT * FROM ON(ru.user_id = et.user_id AND et.event_type = 6) ) s1 ORDER BY 2 DESC, 1; - user_id | done_event ----------+------------ + user_id | done_event +--------------------------------------------------------------------- 1 | YES 3 | NO 5 | NO @@ -488,8 +488,8 @@ DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer jo CREATE VIEW selected_users AS SELECT * FROM users_table WHERE value_1 >= 1 and value_1 <3; CREATE VIEW recent_selected_users AS SELECT su.* FROM selected_users su JOIN recent_users ru USING(user_id); SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER BY 1; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 3 5 @@ -497,8 +497,8 @@ SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER BY 1; -- this would be supported when we implement where partition_key in (subquery) support SELECT et.user_id, et.time FROM events_table et WHERE et.user_id IN (SELECT user_id FROM recent_selected_users) GROUP BY 1,2 ORDER BY 1 DESC,2 DESC LIMIT 5; - user_id | time ----------+--------------------------------- + user_id | time +--------------------------------------------------------------------- 5 | Thu Nov 23 16:11:02.929469 2017 5 | Thu Nov 23 14:40:40.467511 2017 5 | Thu Nov 23 14:28:51.833214 2017 @@ -508,8 +508,8 @@ SELECT et.user_id, et.time FROM events_table et WHERE et.user_id IN (SELECT user -- it is supported when it is a router query SELECT count(*) FROM events_table et WHERE et.user_id IN (SELECT user_id FROM recent_selected_users WHERE user_id = 1); - count -------- + count +--------------------------------------------------------------------- 15 (1 row) @@ -518,8 +518,8 @@ SELECT count(*) FROM events_table et WHERE et.user_id IN (SELECT user_id FROM re UNION (SELECT user_id FROM selected_users) ORDER BY 1; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -536,8 +536,8 @@ SELECT * (SELECT user_id FROM selected_users) ) u WHERE user_id < 2 AND user_id > 0 ORDER BY user_id; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 (1 row) @@ -549,8 +549,8 @@ SELECT * (SELECT user_id FROM selected_users) ) u WHERE user_id < 2 AND user_id > 0 ORDER BY user_id; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 1 (2 rows) @@ -561,8 +561,8 @@ SELECT count(*) UNION (SELECT user_id FROM selected_users) ) u WHERE user_id < 2 AND user_id > 0; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -573,8 +573,8 @@ SELECT count(*) UNION ALL (SELECT user_id FROM selected_users) ) u WHERE user_id < 2 AND user_id > 0; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -588,8 +588,8 @@ SELECT count(*) UNION (SELECT user_id FROM (SELECT * FROM users_table WHERE value_1 >= 1 and value_1 < 3) bb) ) u WHERE user_id < 2 AND user_id > 0; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -602,8 +602,8 @@ SELECT count(*) UNION ALL (SELECT user_id FROM (SELECT * FROM users_table WHERE value_1 >= 1 and value_1 < 3) bb) ) u WHERE user_id < 2 AND user_id > 0; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -611,8 +611,8 @@ SELECT count(*) -- distinct is supported if it is on a partition key CREATE VIEW distinct_user_with_value_1_3 AS SELECT DISTINCT user_id FROM users_table WHERE value_1 = 3; SELECT * FROM distinct_user_with_value_1_3 ORDER BY user_id; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -625,8 +625,8 @@ SELECT * FROM distinct_user_with_value_1_3 ORDER BY user_id; -- but will be supported via recursive planning CREATE VIEW distinct_value_1 AS SELECT DISTINCT value_1 FROM users_table WHERE value_2 = 3; SELECT * FROM distinct_value_1 ORDER BY 1 DESC LIMIT 5; - value_1 ---------- + value_1 +--------------------------------------------------------------------- 5 4 3 @@ -638,53 +638,53 @@ SELECT * FROM distinct_value_1 ORDER BY 1 DESC LIMIT 5; CREATE VIEW cte_view_1 AS WITH c1 AS (SELECT * FROM users_table WHERE value_1 = 3) SELECT * FROM c1 WHERE value_2 < 4 AND EXISTS (SELECT * FROM c1); SELECT * FROM cte_view_1 ORDER BY 1,2,3,4,5 LIMIT 5; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 | - 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | - 4 | Wed Nov 22 23:59:46.493416 2017 | 3 | 1 | 3 | - 4 | Thu Nov 23 01:55:21.824618 2017 | 3 | 1 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 | + 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | + 4 | Wed Nov 22 23:59:46.493416 2017 | 3 | 1 | 3 | + 4 | Thu Nov 23 01:55:21.824618 2017 | 3 | 1 | 4 | (5 rows) -- this is single shard query and still not supported since it has view + cte -- router planner can't detect it SELECT * FROM cte_view_1 WHERE user_id = 2 ORDER BY 1,2,3,4,5; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+--------------------------------+---------+---------+---------+--------- - 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 | (1 row) -- if CTE itself prunes down to a single shard than the view is supported (router plannable) CREATE VIEW cte_view_2 AS WITH c1 AS (SELECT * FROM users_table WHERE user_id = 2) SELECT * FROM c1 WHERE value_1 = 3; SELECT * FROM cte_view_2; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 | - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 11:41:04.042936 2017 | 3 | 4 | 1 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 | + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 11:41:04.042936 2017 | 3 | 4 | 1 | (4 rows) CREATE VIEW router_view AS SELECT * FROM users_table WHERE user_id = 2; -- router plannable SELECT user_id FROM router_view GROUP BY 1; - user_id ---------- + user_id +--------------------------------------------------------------------- 2 (1 row) -- join a router view SELECT * FROM (SELECT user_id FROM router_view GROUP BY 1) rv JOIN recent_events USING (user_id) ORDER BY 2 LIMIT 3; - user_id | time ----------+--------------------------------- + user_id | time +--------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 (1 row) SELECT * FROM (SELECT user_id FROM router_view GROUP BY 1) rv JOIN (SELECT * FROM recent_events) re USING (user_id) ORDER BY 2 LIMIT 3; - user_id | time ----------+--------------------------------- + user_id | time +--------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 (1 row) @@ -696,8 +696,8 @@ CREATE VIEW recent_10_users AS LIMIT 10; -- this is not supported since it has limit in it and subquery_pushdown is not set SELECT * FROM recent_10_users; - user_id | lastseen ----------+--------------------------------- + user_id | lastseen +--------------------------------------------------------------------- 1 | Thu Nov 23 17:30:34.635085 2017 3 | Thu Nov 23 17:18:51.048758 2017 5 | Thu Nov 23 16:48:32.08896 2017 @@ -718,8 +718,8 @@ ERROR: cannot push down this subquery DETAIL: Limit in subquery without limit in the outermost query is unsupported -- now both are supported when there is a limit on the outer most query SELECT * FROM recent_10_users ORDER BY lastseen DESC LIMIT 10; - user_id | lastseen ----------+--------------------------------- + user_id | lastseen +--------------------------------------------------------------------- 1 | Thu Nov 23 17:30:34.635085 2017 3 | Thu Nov 23 17:18:51.048758 2017 5 | Thu Nov 23 16:48:32.08896 2017 @@ -729,26 +729,26 @@ SELECT * FROM recent_10_users ORDER BY lastseen DESC LIMIT 10; (6 rows) SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10; - user_id | time | event_type | value_2 | value_3 | value_4 ----------+---------------------------------+------------+---------+---------+--------- - 1 | Thu Nov 23 21:54:46.924477 2017 | 6 | 4 | 5 | - 4 | Thu Nov 23 18:10:21.338399 2017 | 1 | 2 | 4 | - 3 | Thu Nov 23 18:08:26.550729 2017 | 2 | 4 | 3 | - 2 | Thu Nov 23 17:26:14.563216 2017 | 1 | 5 | 3 | - 3 | Thu Nov 23 16:44:41.903713 2017 | 4 | 2 | 2 | - 3 | Thu Nov 23 16:31:56.219594 2017 | 5 | 1 | 2 | - 4 | Thu Nov 23 16:20:33.264457 2017 | 0 | 0 | 3 | - 5 | Thu Nov 23 16:11:02.929469 2017 | 4 | 2 | 0 | - 2 | Thu Nov 23 15:58:49.273421 2017 | 5 | 1 | 2 | - 5 | Thu Nov 23 14:40:40.467511 2017 | 1 | 4 | 1 | + user_id | time | event_type | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Thu Nov 23 21:54:46.924477 2017 | 6 | 4 | 5 | + 4 | Thu Nov 23 18:10:21.338399 2017 | 1 | 2 | 4 | + 3 | Thu Nov 23 18:08:26.550729 2017 | 2 | 4 | 3 | + 2 | Thu Nov 23 17:26:14.563216 2017 | 1 | 5 | 3 | + 3 | Thu Nov 23 16:44:41.903713 2017 | 4 | 2 | 2 | + 3 | Thu Nov 23 16:31:56.219594 2017 | 5 | 1 | 2 | + 4 | Thu Nov 23 16:20:33.264457 2017 | 0 | 0 | 3 | + 5 | Thu Nov 23 16:11:02.929469 2017 | 4 | 2 | 0 | + 2 | Thu Nov 23 15:58:49.273421 2017 | 5 | 1 | 2 | + 5 | Thu Nov 23 14:40:40.467511 2017 | 1 | 4 | 1 | (10 rows) RESET citus.subquery_pushdown; VACUUM ANALYZE users_table; -- explain tests EXPLAIN (COSTS FALSE) SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER BY 1; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort Sort Key: remote_scan.user_id -> HashAggregate @@ -757,7 +757,7 @@ EXPLAIN (COSTS FALSE) SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: users_table.user_id -> Nested Loop @@ -779,15 +779,15 @@ EXPLAIN (COSTS FALSE) SELECT * (SELECT user_id FROM selected_users) ) u WHERE user_id < 4 AND user_id > 1 ORDER BY user_id; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort Sort Key: remote_scan.user_id -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Unique -> Sort Sort Key: recent_users.user_id @@ -807,13 +807,13 @@ EXPLAIN (COSTS FALSE) SELECT * (23 rows) EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan."time" DESC -> Custom Scan (Citus Adaptive) - -> Distributed Subplan 90_1 + -> Distributed Subplan XXX_1 -> Limit -> Sort Sort Key: remote_scan.lastseen DESC @@ -821,7 +821,7 @@ EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USIN Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Sort Sort Key: (max("time")) DESC @@ -831,7 +831,7 @@ EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USIN Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Sort Sort Key: et."time" DESC @@ -844,8 +844,8 @@ EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USIN SET citus.subquery_pushdown to ON; EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan."time" DESC @@ -853,7 +853,7 @@ EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USIN Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit -> Sort Sort Key: et."time" DESC @@ -887,15 +887,15 @@ CREATE TABLE large (id int, tenant_id int); -- constraint id to be unique for "insert into on conflict" test CREATE TABLE small (id int, tenant_id int, unique(tenant_id)); SELECT create_distributed_table('large','tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('small','tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE VIEW small_view AS SELECT * from small where id < 100; @@ -911,8 +911,8 @@ ERROR: cannot modify views over distributed tables -- using views in modify statements' FROM / WHERE clauses is still valid UPDATE large SET id=20 FROM small_view WHERE small_view.id=large.id; SELECT * FROM large order by 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 2 | 3 5 | 4 20 | 2 @@ -925,8 +925,8 @@ INSERT INTO small VALUES(14, 14); -- using views in subqueries within modify statements is still valid UPDATE large SET id=23 FROM (SELECT *, id*2 from small_view ORDER BY 1,2 LIMIT 5) as small_view WHERE small_view.id=large.id; SELECT * FROM large order by 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 2 | 3 5 | 4 20 | 2 @@ -939,8 +939,8 @@ INSERT INTO large VALUES(14, 14); -- using views in modify statements' FROM / WHERE clauses is still valid UPDATE large SET id=27 FROM small_view WHERE small_view.tenant_id=large.tenant_id; SELECT * FROM large ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -954,8 +954,8 @@ INSERT INTO large VALUES(14, 14); -- test on a router executable update statement UPDATE large SET id=28 FROM small_view WHERE small_view.id=large.id and small_view.tenant_id=14 and large.tenant_id=14; SELECT * FROM large ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -972,15 +972,15 @@ INSERT INTO small VALUES(99, 99); -- run these tests with RETURNING clause to observe the functionality -- print the columns from the "view" as well to test "rewrite resjunk" behaviour UPDATE large SET id=36 FROM small_view WHERE small_view.id=large.id RETURNING large.id, large.tenant_id, small_view.tenant_id; - id | tenant_id | tenant_id -----+-----------+----------- + id | tenant_id | tenant_id +--------------------------------------------------------------------- 36 | 14 | 14 36 | 78 | 99 (2 rows) SELECT * FROM large ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -994,8 +994,8 @@ SELECT * FROM large ORDER BY 1, 2; -- below statement should not update anything. so it should return empty UPDATE large SET id=46 FROM small_view WHERE small_view.id=large.id and large.id=15 RETURNING large.id, large.tenant_id; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- (0 rows) -- we should still have identical rows for next test statements, then insert a new row to large table @@ -1003,8 +1003,8 @@ INSERT INTO large VALUES(14, 14); -- delete statement on large DELETE FROM large WHERE id in (SELECT id FROM small_view); SELECT * FROM large ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -1022,8 +1022,8 @@ INSERT INTO large VALUES(14, 14); WITH all_small_view_ids AS (SELECT id FROM small_view) DELETE FROM large WHERE id in (SELECT * FROM all_small_view_ids); SELECT * FROM large ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -1048,15 +1048,15 @@ CREATE TABLE large_partitioned_p1 PARTITION OF large_partitioned FOR VALUES FROM CREATE TABLE large_partitioned_p2 PARTITION OF large_partitioned FOR VALUES FROM (10) TO (20); CREATE TABLE large_partitioned_p3 PARTITION OF large_partitioned FOR VALUES FROM (20) TO (100); SELECT create_distributed_table('large_partitioned','tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('small','tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE VIEW small_view AS SELECT * from small where id < 100; @@ -1069,8 +1069,8 @@ DELETE FROM small_view; ERROR: cannot modify views over distributed tables UPDATE large_partitioned SET id=27 FROM small_view WHERE small_view.tenant_id=large_partitioned.tenant_id; SELECT * FROM large_partitioned ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 6 | 5 26 | 32 27 | 2 @@ -1086,8 +1086,8 @@ INSERT INTO large_partitioned VALUES(14, 14); -- test on a router executable update statement UPDATE large_partitioned SET id=28 FROM small_view WHERE small_view.id=large_partitioned.id and small_view.tenant_id=14 and large_partitioned.tenant_id=14; SELECT * FROM large_partitioned ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 6 | 5 26 | 32 27 | 2 @@ -1103,8 +1103,8 @@ INSERT INTO large_partitioned VALUES(14, 14); -- delete statement on large DELETE FROM large_partitioned WHERE tenant_id in (SELECT tenant_id FROM small_view); SELECT * FROM large_partitioned ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 6 | 5 26 | 32 29 | 15 @@ -1117,8 +1117,8 @@ INSERT INTO large_partitioned VALUES(14, 14); WITH all_small_view_tenant_ids AS (SELECT tenant_id FROM small_view) DELETE FROM large_partitioned WHERE tenant_id in (SELECT * FROM all_small_view_tenant_ids); SELECT * FROM large_partitioned ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 6 | 5 26 | 32 29 | 15 @@ -1134,15 +1134,15 @@ CREATE TABLE large (id int, tenant_id int); -- constraint id to be unique for "insert into on conflict" test CREATE TABLE small (id int, tenant_id int, unique(tenant_id)); SELECT create_distributed_table('large','tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('small','tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE VIEW small_view AS SELECT id, tenant_id FROM (SELECT *, id*2 FROM small WHERE id < 100 ORDER BY 1,2 LIMIT 5) as foo; @@ -1151,8 +1151,8 @@ CREATE VIEW small_view AS SELECT id, tenant_id FROM (SELECT *, id*2 FROM small W -- using views in modify statements' FROM / WHERE clauses is still valid UPDATE large SET id=20 FROM small_view WHERE small_view.id=large.id; SELECT * FROM large order by 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 2 | 3 5 | 4 20 | 2 @@ -1165,8 +1165,8 @@ INSERT INTO small VALUES(14, 14); -- using views in subqueries within modify statements is still valid UPDATE large SET id=23 FROM (SELECT *, id*2 from small_view ORDER BY 1,2 LIMIT 5) as small_view WHERE small_view.id=large.id; SELECT * FROM large order by 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 2 | 3 5 | 4 20 | 2 @@ -1179,8 +1179,8 @@ INSERT INTO large VALUES(14, 14); -- using views in modify statements' FROM / WHERE clauses is still valid UPDATE large SET id=27 FROM small_view WHERE small_view.tenant_id=large.tenant_id; SELECT * FROM large ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -1194,8 +1194,8 @@ INSERT INTO large VALUES(14, 14); -- test on a router executable update statement UPDATE large SET id=28 FROM small_view WHERE small_view.id=large.id and small_view.tenant_id=14 and large.tenant_id=14; SELECT * FROM large ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -1212,14 +1212,14 @@ INSERT INTO small VALUES(99, 99); -- run these tests with RETURNING clause to observe the functionality -- print the columns from the "view" as well to test "rewrite resjunk" behaviour UPDATE large SET id=36 FROM small_view WHERE small_view.id=large.id RETURNING large.id, large.tenant_id, small_view.tenant_id; - id | tenant_id | tenant_id -----+-----------+----------- + id | tenant_id | tenant_id +--------------------------------------------------------------------- 36 | 14 | 14 (1 row) SELECT * FROM large ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -1233,8 +1233,8 @@ SELECT * FROM large ORDER BY 1, 2; -- below statement should not update anything. so it should return empty UPDATE large SET id=46 FROM small_view WHERE small_view.id=large.id and large.id=15 RETURNING large.id, large.tenant_id; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- (0 rows) -- we should still have identical rows for next test statements, then insert a new row to large table @@ -1242,8 +1242,8 @@ INSERT INTO large VALUES(14, 14); -- delete statement on large DELETE FROM large WHERE id in (SELECT id FROM small_view); SELECT * FROM large ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -1261,8 +1261,8 @@ INSERT INTO large VALUES(14, 14); WITH all_small_view_ids AS (SELECT id FROM small_view) DELETE FROM large WHERE id in (SELECT * FROM all_small_view_ids); SELECT * FROM large ORDER BY 1, 2; - id | tenant_id -----+----------- + id | tenant_id +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 diff --git a/src/test/regress/expected/multi_working_columns.out b/src/test/regress/expected/multi_working_columns.out index 38c05af49..70a44b4ef 100644 --- a/src/test/regress/expected/multi_working_columns.out +++ b/src/test/regress/expected/multi_working_columns.out @@ -6,8 +6,8 @@ -- these columns are pulled to the master, and are correctly used in sorting and -- grouping. SELECT l_quantity FROM lineitem ORDER BY l_shipdate, l_quantity LIMIT 20; - l_quantity ------------- + l_quantity +--------------------------------------------------------------------- 38.00 13.00 15.00 @@ -33,8 +33,8 @@ SELECT l_quantity FROM lineitem ORDER BY l_shipdate, l_quantity LIMIT 20; SELECT l_quantity, count(*) as count FROM lineitem GROUP BY l_quantity, l_shipdate ORDER BY l_quantity, count LIMIT 20; - l_quantity | count -------------+------- + l_quantity | count +--------------------------------------------------------------------- 1.00 | 1 1.00 | 1 1.00 | 1 @@ -60,8 +60,8 @@ SELECT l_quantity, count(*) as count FROM lineitem SELECT l_quantity, l_shipdate, count(*) as count FROM lineitem GROUP BY l_quantity, l_shipdate ORDER BY l_quantity, count, l_shipdate LIMIT 20; - l_quantity | l_shipdate | count -------------+------------+------- + l_quantity | l_shipdate | count +--------------------------------------------------------------------- 1.00 | 02-07-1992 | 1 1.00 | 02-23-1992 | 1 1.00 | 03-17-1992 | 1 diff --git a/src/test/regress/expected/mx_foreign_key_to_reference_table.out b/src/test/regress/expected/mx_foreign_key_to_reference_table.out index a1d8d51ae..d99500a13 100644 --- a/src/test/regress/expected/mx_foreign_key_to_reference_table.out +++ b/src/test/regress/expected/mx_foreign_key_to_reference_table.out @@ -7,12 +7,12 @@ SET citus.next_placement_id TO 7000000; SET citus.replication_model TO streaming; -- Setup the view so that we can check if the foreign keys are created properly CREATE TYPE foreign_details AS (name text, relid text, refd_relid text); -CREATE VIEW table_fkeys_in_workers AS +CREATE VIEW table_fkeys_in_workers AS SELECT -(json_populate_record(NULL::foreign_details, - json_array_elements_text((run_command_on_workers( $$ +(json_populate_record(NULL::foreign_details, + json_array_elements_text((run_command_on_workers( $$ SELECT - COALESCE(json_agg(row_to_json(d)), '[]'::json) + COALESCE(json_agg(row_to_json(d)), '[]'::json) FROM ( SELECT @@ -20,7 +20,7 @@ SELECT relid::regclass::text, refd_relid::regclass::text FROM - table_fkey_cols + table_fkey_cols ) d $$ )).RESULT::json )::json )).* ; -- Check if MX can create foreign keys properly on foreign keys from distributed to reference tables @@ -30,27 +30,27 @@ CREATE TABLE referencing_table(id int, ref_id int); ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCES referenced_table(test_column) ON DELETE CASCADE; ALTER TABLE referencing_table ADD CONSTRAINT foreign_key_2 FOREIGN KEY (id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE; SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('referenced_table2'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET search_path TO 'fkey_reference_table'; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1, 2; - name | relid | refd_relid ------------------------+------------------------------------------------+------------------------------------------------ + name | relid | refd_relid +--------------------------------------------------------------------- fkey_ref | fkey_reference_table.referencing_table | fkey_reference_table.referenced_table fkey_ref | fkey_reference_table.referencing_table | fkey_reference_table.referenced_table fkey_ref_7000002 | fkey_reference_table.referencing_table_7000002 | fkey_reference_table.referenced_table_7000000 diff --git a/src/test/regress/expected/non_colocated_join_order.out b/src/test/regress/expected/non_colocated_join_order.out index ba28a26c1..e8e95f8be 100644 --- a/src/test/regress/expected/non_colocated_join_order.out +++ b/src/test/regress/expected/non_colocated_join_order.out @@ -4,18 +4,18 @@ -- Tests to check placements of shards must be equal to choose local join logic. CREATE TABLE test_table_1(id int, value_1 int); SELECT master_create_distributed_table('test_table_1', 'id', 'append'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) \copy test_table_1 FROM STDIN DELIMITER ',' \copy test_table_1 FROM STDIN DELIMITER ',' CREATE TABLE test_table_2(id int, value_1 int); SELECT master_create_distributed_table('test_table_2', 'id', 'append'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) \copy test_table_2 FROM STDIN DELIMITER ',' @@ -26,8 +26,8 @@ SET client_min_messages to DEBUG1; -- local join logic will be triggered. SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id; LOG: join order: [ "test_table_1" ][ local partition join "test_table_2" ] - count -------- + count +--------------------------------------------------------------------- 6 (1 row) @@ -42,8 +42,8 @@ SET citus.shard_replication_factor to 1; SET citus.enable_repartition_joins to ON; SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id; LOG: join order: [ "test_table_1" ][ single range partition join "test_table_2" ] - count -------- + count +--------------------------------------------------------------------- 9 (1 row) diff --git a/src/test/regress/expected/non_colocated_leaf_subquery_joins.out b/src/test/regress/expected/non_colocated_leaf_subquery_joins.out index 298093b9d..da9964f0e 100644 --- a/src/test/regress/expected/non_colocated_leaf_subquery_joins.out +++ b/src/test/regress/expected/non_colocated_leaf_subquery_joins.out @@ -23,8 +23,8 @@ BEGIN END; $BODY$ LANGUAGE plpgsql; SHOW log_error_verbosity; - log_error_verbosity ---------------------- + log_error_verbosity +--------------------------------------------------------------------- terse (1 row) @@ -36,10 +36,10 @@ FROM (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8)) as bar WHERE foo.user_id = bar.user_id;$$); -DEBUG: generating subplan 1_1 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) -DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) + valid +--------------------------------------------------------------------- t (1 row) @@ -51,11 +51,11 @@ FROM (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (5,6,7,8)) as bar WHERE foo.user_id = bar.user_id;$$); -DEBUG: generating subplan 3_1 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) -DEBUG: generating subplan 3_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) -DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) +DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) + valid +--------------------------------------------------------------------- t (1 row) @@ -65,35 +65,35 @@ SELECT true AS valid FROM explain_json($$SELECT FROM users_table WHERE - value_1 + value_1 IN - (SELECT - users_table.user_id - FROM - users_table, events_table - WHERE + (SELECT + users_table.user_id + FROM + users_table, events_table + WHERE users_table.user_id = events_table.value_2 AND event_type IN (5,6));$$); -DEBUG: generating subplan 6_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) -DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) + valid +--------------------------------------------------------------------- t (1 row) -- should work fine when used with CTEs SELECT true AS valid FROM explain_json($$ - WITH q1 AS (SELECT user_id FROM users_table) -SELECT count(*) FROM q1, (SELECT - users_table.user_id, random() - FROM - users_table, events_table - WHERE + WITH q1 AS (SELECT user_id FROM users_table) +SELECT count(*) FROM q1, (SELECT + users_table.user_id, random() + FROM + users_table, events_table + WHERE users_table.user_id = events_table.value_2 AND event_type IN (1,2,3,4)) as bar WHERE bar.user_id = q1.user_id ;$$); -DEBUG: generating subplan 8_1 for CTE q1: SELECT user_id FROM public.users_table -DEBUG: generating subplan 8_2 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) -DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) q1, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) - valid -------- +DEBUG: generating subplan XXX_1 for CTE q1: SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) q1, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) + valid +--------------------------------------------------------------------- t (1 row) @@ -101,11 +101,11 @@ DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT count(*) AS cou SELECT true AS valid FROM explain_json($$ (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (1,2,3,4)) UNION (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8));$$); -DEBUG: generating subplan 11_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) -DEBUG: generating subplan 11_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) -DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('11_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) +DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) + valid +--------------------------------------------------------------------- t (1 row) @@ -115,19 +115,19 @@ SELECT event, array_length(events_table, 1) FROM ( SELECT event, array_agg(t.user_id) AS events_table FROM ( - SELECT + SELECT DISTINCT ON(e.event_type::text) e.event_type::text as event, e.time, e.user_id - FROM + FROM users_table AS u, events_table AS e, (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (5,6,7,8)) as bar - WHERE u.user_id = e.user_id AND - u.user_id IN + WHERE u.user_id = e.user_id AND + u.user_id IN ( - SELECT - user_id - FROM - users_table + SELECT + user_id + FROM + users_table WHERE value_2 >= 5 AND EXISTS (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (1,2,3,4)) LIMIT 5 @@ -137,15 +137,15 @@ FROM ( ) q ORDER BY 2 DESC, 1; $$); -DEBUG: generating subplan 14_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: push down of limit count: 5 -DEBUG: generating subplan 14_2 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT intermediate_result.user_id FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) LIMIT 5 -DEBUG: generating subplan 14_3 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) -DEBUG: generating subplan 14_4 for subquery SELECT DISTINCT ON ((e.event_type)::text) (e.event_type)::text AS event, e."time", e.user_id FROM public.users_table u, public.events_table e, (SELECT intermediate_result.user_id FROM read_intermediate_result('14_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) -DEBUG: generating subplan 14_5 for subquery SELECT t.event, array_agg(t.user_id) AS events_table FROM (SELECT intermediate_result.event, intermediate_result."time", intermediate_result.user_id FROM read_intermediate_result('14_4'::text, 'binary'::citus_copy_format) intermediate_result(event text, "time" timestamp without time zone, user_id integer)) t, public.users_table WHERE (users_table.value_1 OPERATOR(pg_catalog.=) (t.event)::integer) GROUP BY t.event -DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT event, array_length(events_table, 1) AS array_length FROM (SELECT intermediate_result.event, intermediate_result.events_table FROM read_intermediate_result('14_5'::text, 'binary'::citus_copy_format) intermediate_result(event text, events_table integer[])) q ORDER BY (array_length(events_table, 1)) DESC, event - valid -------- +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) LIMIT 5 +DEBUG: generating subplan XXX_3 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) +DEBUG: generating subplan XXX_4 for subquery SELECT DISTINCT ON ((e.event_type)::text) (e.event_type)::text AS event, e."time", e.user_id FROM public.users_table u, public.events_table e, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) +DEBUG: generating subplan XXX_5 for subquery SELECT t.event, array_agg(t.user_id) AS events_table FROM (SELECT intermediate_result.event, intermediate_result."time", intermediate_result.user_id FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(event text, "time" timestamp without time zone, user_id integer)) t, public.users_table WHERE (users_table.value_1 OPERATOR(pg_catalog.=) (t.event)::integer) GROUP BY t.event +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT event, array_length(events_table, 1) AS array_length FROM (SELECT intermediate_result.event, intermediate_result.events_table FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(event text, events_table integer[])) q ORDER BY (array_length(events_table, 1)) DESC, event + valid +--------------------------------------------------------------------- t (1 row) @@ -158,10 +158,10 @@ FROM (SELECT users_table.user_id, value_1 FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8)) as bar WHERE foo.user_id = bar.value_1;$$); -DEBUG: generating subplan 20_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) -DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.value_1) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.value_1) + valid +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/non_colocated_subquery_joins.out b/src/test/regress/expected/non_colocated_subquery_joins.out index 15fdc52ac..171781aaa 100644 --- a/src/test/regress/expected/non_colocated_subquery_joins.out +++ b/src/test/regress/expected/non_colocated_subquery_joins.out @@ -41,10 +41,10 @@ SELECT true AS valid FROM explain_json_2($$ WHERE foo.value_2 = bar.value_2; $$); -DEBUG: generating subplan 3_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) -DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT foo.value_2 FROM (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.value_2 FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.value_2) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.value_2 FROM (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.value_2) + valid +--------------------------------------------------------------------- t (1 row) @@ -61,10 +61,10 @@ SELECT true AS valid FROM explain_json_2($$ (SELECT event_type FROM events_table WHERE user_id < 100); $$); -DEBUG: generating subplan 5_1 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 100) -DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer))) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 100) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer))) + valid +--------------------------------------------------------------------- t (1 row) @@ -80,10 +80,10 @@ SELECT true AS valid FROM explain_json_2($$ NOT IN (SELECT user_id FROM events_table WHERE event_type = 2); $$); -DEBUG: generating subplan 7_1 for subquery SELECT user_id FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) 2) -DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.events_table WHERE (NOT (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) 2) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.events_table WHERE (NOT (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) + valid +--------------------------------------------------------------------- t (1 row) @@ -101,10 +101,10 @@ SELECT true AS valid FROM explain_json_2($$ foo.event_type IN (SELECT event_type FROM events_table WHERE user_id < 3); $$); -DEBUG: generating subplan 9_1 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 3) -DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer)))) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 3) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer)))) + valid +--------------------------------------------------------------------- t (1 row) @@ -121,10 +121,10 @@ SELECT true AS valid FROM explain_json_2($$ foo.user_id = bar.user_id AND foo.user_id IN (SELECT user_id FROM events_table WHERE user_id < 10); $$); -DEBUG: generating subplan 11_1 for subquery SELECT (users_table.user_id OPERATOR(pg_catalog./) 2) AS user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) -DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.user_id OPERATOR(pg_catalog.=) ANY (SELECT events_table.user_id FROM public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.<) 10)))) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT (users_table.user_id OPERATOR(pg_catalog./) 2) AS user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.user_id OPERATOR(pg_catalog.=) ANY (SELECT events_table.user_id FROM public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.<) 10)))) + valid +--------------------------------------------------------------------- t (1 row) @@ -141,11 +141,11 @@ SELECT true AS valid FROM explain_json_2($$ foo.user_id = bar.user_id AND foo.user_id NOT IN (SELECT user_id FROM events_table WHERE user_id < 10); $$); -DEBUG: generating subplan 13_1 for subquery SELECT (users_table.user_id OPERATOR(pg_catalog./) 2) AS user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) -DEBUG: generating subplan 13_2 for subquery SELECT user_id FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 10) -DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (NOT (foo.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('13_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT (users_table.user_id OPERATOR(pg_catalog./) 2) AS user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 10) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (NOT (foo.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) + valid +--------------------------------------------------------------------- t (1 row) @@ -162,11 +162,11 @@ SELECT true AS valid FROM explain_json_2($$ foo.user_id = bar.user_id AND foo.event_type IN (SELECT event_type FROM events_table WHERE user_id < 4); $$); -DEBUG: generating subplan 16_1 for subquery SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) -DEBUG: generating subplan 16_2 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 4) -DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('16_2'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer)))) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) +DEBUG: generating subplan XXX_2 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 4) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer)))) + valid +--------------------------------------------------------------------- t (1 row) @@ -186,13 +186,13 @@ SELECT true AS valid FROM explain_json_2($$ ) as foo_top, events_table WHERE events_table.user_id = foo_top.user_id; $$); -DEBUG: generating subplan 19_1 for subquery SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) -DEBUG: generating subplan 19_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.event_type) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) -DEBUG: generating subplan 19_3 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) 5) -DEBUG: generating subplan 19_4 for subquery SELECT foo.user_id, random() AS random FROM (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('19_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('19_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('19_3'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer)))) -DEBUG: Plan 19 query after replacing subqueries and CTEs: SELECT foo_top.user_id, foo_top.random, events_table.user_id FROM (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('19_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) foo_top, public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) foo_top.user_id) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) +DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.event_type) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) +DEBUG: generating subplan XXX_3 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) 5) +DEBUG: generating subplan XXX_4 for subquery SELECT foo.user_id, random() AS random FROM (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer)))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo_top.user_id, foo_top.random, events_table.user_id FROM (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) foo_top, public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) foo_top.user_id) + valid +--------------------------------------------------------------------- t (1 row) @@ -220,10 +220,10 @@ SELECT true AS valid FROM explain_json_2($$ ) as foo_top; $$); -DEBUG: generating subplan 24_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) -DEBUG: Plan 24 query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('24_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top + valid +--------------------------------------------------------------------- t (1 row) @@ -251,10 +251,10 @@ SELECT true AS valid FROM explain_json_2($$ foo1.user_id = foo5.user_id ) as foo_top; $$); -DEBUG: generating subplan 26_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) -DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.user_id))) foo_top - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.user_id))) foo_top + valid +--------------------------------------------------------------------- t (1 row) @@ -280,11 +280,11 @@ SELECT true AS valid FROM explain_json_2($$ foo1.user_id = foo5.value_1 ) as foo_top; $$); -DEBUG: generating subplan 28_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) -DEBUG: generating subplan 28_2 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) -DEBUG: Plan 28 query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('28_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('28_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) +DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top + valid +--------------------------------------------------------------------- t (1 row) @@ -311,11 +311,11 @@ SELECT true AS valid FROM explain_json_2($$ foo2.user_id = foo5.value_1 ) as foo_top; $$); -DEBUG: generating subplan 31_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) -DEBUG: generating subplan 31_2 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) -DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('31_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo2.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) +DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo2.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top + valid +--------------------------------------------------------------------- t (1 row) @@ -344,11 +344,11 @@ SELECT true AS valid FROM explain_json_2($$ foo.user_id = bar.user_id) as bar_top ON (foo_top.user_id = bar_top.user_id); $$); -DEBUG: generating subplan 34_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) -DEBUG: generating subplan 34_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) -DEBUG: Plan 34 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('34_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('34_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) bar_top ON ((foo_top.user_id OPERATOR(pg_catalog.=) bar_top.user_id))) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) +DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) bar_top ON ((foo_top.user_id OPERATOR(pg_catalog.=) bar_top.user_id))) + valid +--------------------------------------------------------------------- t (1 row) @@ -378,10 +378,10 @@ SELECT true AS valid FROM explain_json_2($$ ON (foo_top.value_2 = bar_top.user_id); $$); -DEBUG: generating subplan 37_1 for subquery SELECT foo.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) -DEBUG: Plan 37 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id, foo.value_2 FROM (SELECT DISTINCT users_table.user_id, users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('37_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar_top ON ((foo_top.value_2 OPERATOR(pg_catalog.=) bar_top.user_id))) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT foo.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id, foo.value_2 FROM (SELECT DISTINCT users_table.user_id, users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar_top ON ((foo_top.value_2 OPERATOR(pg_catalog.=) bar_top.user_id))) + valid +--------------------------------------------------------------------- t (1 row) @@ -409,11 +409,11 @@ SELECT true AS valid FROM explain_json_2($$ foo.user_id = bar.user_id) as bar_top ON (foo_top.value_2 = bar_top.user_id); $$); -DEBUG: generating subplan 39_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16]))) -DEBUG: generating subplan 39_2 for subquery SELECT foo.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('39_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) -DEBUG: Plan 39 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id, foo.value_2 FROM (SELECT DISTINCT users_table.user_id, users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('39_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar_top ON ((foo_top.value_2 OPERATOR(pg_catalog.=) bar_top.user_id))) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16]))) +DEBUG: generating subplan XXX_2 for subquery SELECT foo.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id, foo.value_2 FROM (SELECT DISTINCT users_table.user_id, users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar_top ON ((foo_top.value_2 OPERATOR(pg_catalog.=) bar_top.user_id))) + valid +--------------------------------------------------------------------- t (1 row) @@ -430,10 +430,10 @@ SELECT true AS valid FROM explain_json_2($$ WHERE foo.my_users = users_table.user_id) as mid_level_query ) as bar; $$); -DEBUG: generating subplan 42_1 for subquery SELECT events_table.user_id AS my_users FROM public.events_table, public.users_table WHERE (events_table.event_type OPERATOR(pg_catalog.=) users_table.user_id) -DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT intermediate_result.my_users FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(my_users integer)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT events_table.user_id AS my_users FROM public.events_table, public.users_table WHERE (events_table.event_type OPERATOR(pg_catalog.=) users_table.user_id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT intermediate_result.my_users FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(my_users integer)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar + valid +--------------------------------------------------------------------- t (1 row) @@ -449,10 +449,10 @@ SELECT true AS valid FROM explain_json_2($$ WHERE foo.my_users = users_table.user_id) as mid_level_query ) as bar; $$); -DEBUG: generating subplan 44_1 for subquery SELECT events_table.event_type AS my_users, random() AS random FROM public.events_table, public.users_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id) -DEBUG: Plan 44 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT intermediate_result.my_users, intermediate_result.random FROM read_intermediate_result('44_1'::text, 'binary'::citus_copy_format) intermediate_result(my_users integer, random double precision)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT events_table.event_type AS my_users, random() AS random FROM public.events_table, public.users_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT intermediate_result.my_users, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(my_users integer, random double precision)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar + valid +--------------------------------------------------------------------- t (1 row) @@ -472,10 +472,10 @@ DEBUG: Plan 44 query after replacing subqueries and CTEs: SELECT count(*) AS co WHERE foo.my_users = users_table.user_id) as mid_level_query ) as bar; $$); -DEBUG: generating subplan 46_1 for subquery SELECT events_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) -DEBUG: Plan 46 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT events_table.user_id AS my_users FROM public.events_table, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('46_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) selected_users WHERE (events_table.event_type OPERATOR(pg_catalog.=) selected_users.user_id)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT events_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT events_table.user_id AS my_users FROM public.events_table, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) selected_users WHERE (events_table.event_type OPERATOR(pg_catalog.=) selected_users.user_id)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar + valid +--------------------------------------------------------------------- t (1 row) @@ -505,10 +505,10 @@ SELECT true AS valid FROM explain_json_2($$ ) as bar; $$); -DEBUG: generating subplan 48_1 for subquery SELECT value_2 FROM public.events_table -DEBUG: Plan 48 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT events_table.user_id AS my_users FROM public.events_table, (SELECT events_table_1.user_id FROM public.users_table users_table_1, public.events_table events_table_1 WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table_1.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('48_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))))) selected_users WHERE (events_table.user_id OPERATOR(pg_catalog.=) selected_users.user_id)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT value_2 FROM public.events_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT events_table.user_id AS my_users FROM public.events_table, (SELECT events_table_1.user_id FROM public.users_table users_table_1, public.events_table events_table_1 WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table_1.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))))) selected_users WHERE (events_table.user_id OPERATOR(pg_catalog.=) selected_users.user_id)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar + valid +--------------------------------------------------------------------- t (1 row) @@ -526,10 +526,10 @@ WHERE users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (5,6));$$); -DEBUG: generating subplan 50_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) -DEBUG: Plan 50 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('50_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) + valid +--------------------------------------------------------------------- t (1 row) @@ -542,11 +542,11 @@ SELECT count(*) FROM q1, (SELECT users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (1,2,3,4)) as bar WHERE bar.user_id = q1.user_id ;$$); -DEBUG: generating subplan 52_1 for CTE q1: SELECT user_id FROM public.users_table -DEBUG: generating subplan 52_2 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) -DEBUG: Plan 52 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('52_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) q1, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('52_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) - valid -------- +DEBUG: generating subplan XXX_1 for CTE q1: SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) q1, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) + valid +--------------------------------------------------------------------- t (1 row) @@ -559,10 +559,10 @@ SELECT true AS valid FROM explain_json_2($$ users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4)) as bar WHERE bar.user_id = q1.user_id ;$$); -DEBUG: generating subplan 55_1 for CTE q1: SELECT user_id FROM public.users_table -DEBUG: Plan 55 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('55_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) q1, (SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) - valid -------- +DEBUG: generating subplan XXX_1 for CTE q1: SELECT user_id FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) q1, (SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) + valid +--------------------------------------------------------------------- t (1 row) @@ -570,11 +570,11 @@ DEBUG: Plan 55 query after replacing subqueries and CTEs: SELECT count(*) AS co SELECT true AS valid FROM explain_json_2($$ (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (1,2,3,4)) UNION (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8));$$); -DEBUG: generating subplan 57_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) -DEBUG: generating subplan 57_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) -DEBUG: Plan 57 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('57_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('57_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) +DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) + valid +--------------------------------------------------------------------- t (1 row) @@ -606,15 +606,15 @@ FROM ( ) q ORDER BY 2 DESC, 1; $$); -DEBUG: generating subplan 60_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: push down of limit count: 5 -DEBUG: generating subplan 60_2 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT intermediate_result.user_id FROM read_intermediate_result('60_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) LIMIT 5 -DEBUG: generating subplan 60_3 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) -DEBUG: generating subplan 60_4 for subquery SELECT DISTINCT ON ((e.event_type)::text) (e.event_type)::text AS event, e."time", e.user_id FROM public.users_table u, public.events_table e, (SELECT intermediate_result.user_id FROM read_intermediate_result('60_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('60_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) -DEBUG: generating subplan 60_5 for subquery SELECT t.event, array_agg(t.user_id) AS events_table FROM (SELECT intermediate_result.event, intermediate_result."time", intermediate_result.user_id FROM read_intermediate_result('60_4'::text, 'binary'::citus_copy_format) intermediate_result(event text, "time" timestamp without time zone, user_id integer)) t, public.users_table WHERE (users_table.value_1 OPERATOR(pg_catalog.=) (t.event)::integer) GROUP BY t.event -DEBUG: Plan 60 query after replacing subqueries and CTEs: SELECT event, array_length(events_table, 1) AS array_length FROM (SELECT intermediate_result.event, intermediate_result.events_table FROM read_intermediate_result('60_5'::text, 'binary'::citus_copy_format) intermediate_result(event text, events_table integer[])) q ORDER BY (array_length(events_table, 1)) DESC, event - valid -------- +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) LIMIT 5 +DEBUG: generating subplan XXX_3 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) +DEBUG: generating subplan XXX_4 for subquery SELECT DISTINCT ON ((e.event_type)::text) (e.event_type)::text AS event, e."time", e.user_id FROM public.users_table u, public.events_table e, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) +DEBUG: generating subplan XXX_5 for subquery SELECT t.event, array_agg(t.user_id) AS events_table FROM (SELECT intermediate_result.event, intermediate_result."time", intermediate_result.user_id FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(event text, "time" timestamp without time zone, user_id integer)) t, public.users_table WHERE (users_table.value_1 OPERATOR(pg_catalog.=) (t.event)::integer) GROUP BY t.event +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT event, array_length(events_table, 1) AS array_length FROM (SELECT intermediate_result.event, intermediate_result.events_table FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(event text, events_table integer[])) q ORDER BY (array_length(events_table, 1)) DESC, event + valid +--------------------------------------------------------------------- t (1 row) @@ -627,8 +627,8 @@ SELECT true AS valid FROM explain_json_2($$ FROM (users_table u1 JOIN users_table u2 using(value_1)) a JOIN (SELECT value_1, random() FROM users_table) as u3 USING (value_1); $$); -DEBUG: generating subplan 66_1 for subquery SELECT value_1, random() AS random FROM public.users_table -DEBUG: Plan 66 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((public.users_table u1 JOIN public.users_table u2 USING (value_1)) a(value_1, user_id, "time", value_2, value_3, value_4, user_id_1, time_1, value_2_1, value_3_1, value_4_1) JOIN (SELECT intermediate_result.value_1, intermediate_result.random FROM read_intermediate_result('66_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer, random double precision)) u3 USING (value_1)) +DEBUG: generating subplan XXX_1 for subquery SELECT value_1, random() AS random FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((public.users_table u1 JOIN public.users_table u2 USING (value_1)) a(value_1, user_id, "time", value_2, value_3, value_4, user_id_1, time_1, value_2_1, value_3_1, value_4_1) JOIN (SELECT intermediate_result.value_1, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer, random double precision)) u3 USING (value_1)) ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns -- a very similar query to the above -- however, this time we users a subquery instead of join alias, and it works @@ -639,10 +639,10 @@ SELECT true AS valid FROM explain_json_2($$ FROM (SELECT * FROM users_table u1 JOIN users_table u2 using(value_1)) a JOIN (SELECT value_1, random() FROM users_table) as u3 USING (value_1); $$); -DEBUG: generating subplan 68_1 for subquery SELECT u1.value_1, u1.user_id, u1."time", u1.value_2, u1.value_3, u1.value_4, u2.user_id, u2."time", u2.value_2, u2.value_3, u2.value_4 FROM (public.users_table u1 JOIN public.users_table u2 USING (value_1)) -DEBUG: Plan 68 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.value_1, intermediate_result.user_id, intermediate_result."time", intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4, intermediate_result.user_id_1 AS user_id, intermediate_result.time_1 AS "time", intermediate_result.value_2_1 AS value_2, intermediate_result.value_3_1 AS value_3, intermediate_result.value_4_1 AS value_4 FROM read_intermediate_result('68_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer, user_id integer, "time" timestamp without time zone, value_2 integer, value_3 double precision, value_4 bigint, user_id_1 integer, time_1 timestamp without time zone, value_2_1 integer, value_3_1 double precision, value_4_1 bigint)) a(value_1, user_id, "time", value_2, value_3, value_4, user_id_1, time_1, value_2_1, value_3_1, value_4_1) JOIN (SELECT users_table.value_1, random() AS random FROM public.users_table) u3 USING (value_1)) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT u1.value_1, u1.user_id, u1."time", u1.value_2, u1.value_3, u1.value_4, u2.user_id, u2."time", u2.value_2, u2.value_3, u2.value_4 FROM (public.users_table u1 JOIN public.users_table u2 USING (value_1)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.value_1, intermediate_result.user_id, intermediate_result."time", intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4, intermediate_result.user_id_1 AS user_id, intermediate_result.time_1 AS "time", intermediate_result.value_2_1 AS value_2, intermediate_result.value_3_1 AS value_3, intermediate_result.value_4_1 AS value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer, user_id integer, "time" timestamp without time zone, value_2 integer, value_3 double precision, value_4 bigint, user_id_1 integer, time_1 timestamp without time zone, value_2_1 integer, value_3_1 double precision, value_4_1 bigint)) a(value_1, user_id, "time", value_2, value_3, value_4, user_id_1, time_1, value_2_1, value_3_1, value_4_1) JOIN (SELECT users_table.value_1, random() AS random FROM public.users_table) u3 USING (value_1)) + valid +--------------------------------------------------------------------- t (1 row) @@ -658,10 +658,10 @@ SELECT true AS valid FROM explain_json_2($$ events_table using (value_2); $$); -DEBUG: generating subplan 70_1 for subquery SELECT value_2, random() AS random FROM public.users_table -DEBUG: Plan 70 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.value_2, intermediate_result.random FROM read_intermediate_result('70_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, random double precision)) u1 JOIN public.events_table USING (value_2)) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT value_2, random() AS random FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.value_2, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, random double precision)) u1 JOIN public.events_table USING (value_2)) + valid +--------------------------------------------------------------------- t (1 row) @@ -676,10 +676,10 @@ SELECT true AS valid FROM explain_json_2($$ (SELECT value_2, random() FROM users_table) as u2 USING(value_2); $$); -DEBUG: generating subplan 72_1 for subquery SELECT value_2, random() AS random FROM public.users_table -DEBUG: Plan 72 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT users_table.value_2, random() AS random FROM public.users_table) u1 LEFT JOIN (SELECT intermediate_result.value_2, intermediate_result.random FROM read_intermediate_result('72_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, random double precision)) u2 USING (value_2)) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT value_2, random() AS random FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT users_table.value_2, random() AS random FROM public.users_table) u1 LEFT JOIN (SELECT intermediate_result.value_2, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, random double precision)) u2 USING (value_2)) + valid +--------------------------------------------------------------------- t (1 row) @@ -696,8 +696,8 @@ SELECT true AS valid FROM explain_json_2($$ (SELECT value_2, random() FROM users_table) as u2 USING(value_2); $$); -DEBUG: generating subplan 74_1 for subquery SELECT value_2, random() AS random FROM public.users_table -DEBUG: Plan 74 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT users_table.value_2, random() AS random FROM public.users_table) u1 RIGHT JOIN (SELECT intermediate_result.value_2, intermediate_result.random FROM read_intermediate_result('74_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, random double precision)) u2 USING (value_2)) +DEBUG: generating subplan XXX_1 for subquery SELECT value_2, random() AS random FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT users_table.value_2, random() AS random FROM public.users_table) u1 RIGHT JOIN (SELECT intermediate_result.value_2, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, random double precision)) u2 USING (value_2)) ERROR: cannot pushdown the subquery -- set operations may produce not very efficient plans -- although we could have picked a as our anchor subquery, @@ -715,13 +715,13 @@ SELECT true AS valid FROM explain_json_2($$ (SELECT value_1 FROM users_table) as foo ON (a.user_id = foo.value_1) ); $$); -DEBUG: generating subplan 77_1 for subquery SELECT user_id FROM public.users_table -DEBUG: generating subplan 77_2 for subquery SELECT user_id FROM public.users_table -DEBUG: Plan 77 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('77_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('77_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) -DEBUG: generating subplan 76_1 for subquery SELECT users_table.user_id FROM public.users_table UNION SELECT users_table.user_id FROM public.users_table -DEBUG: Plan 76 query after replacing subqueries and CTEs: SELECT a.user_id, foo.value_1 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('76_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) a JOIN (SELECT users_table.value_1 FROM public.users_table) foo ON ((a.user_id OPERATOR(pg_catalog.=) foo.value_1))) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table UNION SELECT users_table.user_id FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT a.user_id, foo.value_1 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) a JOIN (SELECT users_table.value_1 FROM public.users_table) foo ON ((a.user_id OPERATOR(pg_catalog.=) foo.value_1))) + valid +--------------------------------------------------------------------- t (1 row) @@ -739,13 +739,13 @@ SELECT true AS valid FROM explain_json_2($$ users_table as foo ON (a.user_id = foo.value_1) ); $$); -DEBUG: generating subplan 81_1 for subquery SELECT user_id FROM public.users_table -DEBUG: generating subplan 81_2 for subquery SELECT user_id FROM public.users_table -DEBUG: Plan 81 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('81_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('81_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) -DEBUG: generating subplan 80_1 for subquery SELECT users_table.user_id FROM public.users_table UNION SELECT users_table.user_id FROM public.users_table -DEBUG: Plan 80 query after replacing subqueries and CTEs: SELECT a.user_id, foo.user_id, foo."time", foo.value_1, foo.value_2, foo.value_3, foo.value_4 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('80_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) a JOIN public.users_table foo ON ((a.user_id OPERATOR(pg_catalog.=) foo.value_1))) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table UNION SELECT users_table.user_id FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT a.user_id, foo.user_id, foo."time", foo.value_1, foo.value_2, foo.value_3, foo.value_4 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) a JOIN public.users_table foo ON ((a.user_id OPERATOR(pg_catalog.=) foo.value_1))) + valid +--------------------------------------------------------------------- t (1 row) @@ -772,10 +772,10 @@ SELECT true AS valid FROM explain_json_2($$ ON(foo.user_id = bar.value_1) ); $$); -DEBUG: generating subplan 84_1 for subquery SELECT value_1 FROM public.users_table -DEBUG: Plan 84 query after replacing subqueries and CTEs: SELECT foo.user_id, a.user_id, bar.value_1 FROM (((SELECT users_table.user_id FROM public.users_table) foo JOIN (SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) UNION SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) a ON ((a.user_id OPERATOR(pg_catalog.=) foo.user_id))) JOIN (SELECT intermediate_result.value_1 FROM read_intermediate_result('84_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) bar ON ((foo.user_id OPERATOR(pg_catalog.=) bar.value_1))) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT value_1 FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id, a.user_id, bar.value_1 FROM (((SELECT users_table.user_id FROM public.users_table) foo JOIN (SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) UNION SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) a ON ((a.user_id OPERATOR(pg_catalog.=) foo.user_id))) JOIN (SELECT intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) bar ON ((foo.user_id OPERATOR(pg_catalog.=) bar.value_1))) + valid +--------------------------------------------------------------------- t (1 row) @@ -811,15 +811,15 @@ SELECT true AS valid FROM explain_json_2($$ WHERE non_colocated_subquery.value_2 != non_colocated_subquery_2.cnt $$); -DEBUG: generating subplan 86_1 for CTE non_colocated_subquery: SELECT foo.value_2 FROM (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.value_2) -DEBUG: generating subplan 87_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) -DEBUG: Plan 87 query after replacing subqueries and CTEs: SELECT foo.value_2 FROM (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.value_2 FROM read_intermediate_result('87_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.value_2) -DEBUG: generating subplan 86_2 for CTE non_colocated_subquery_2: SELECT count(*) AS cnt FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT events_table_1.event_type FROM public.events_table events_table_1 WHERE (events_table_1.user_id OPERATOR(pg_catalog.<) 4))) -DEBUG: generating subplan 89_1 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 4) -DEBUG: Plan 89 query after replacing subqueries and CTEs: SELECT count(*) AS cnt FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('89_1'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer))) -DEBUG: Plan 86 query after replacing subqueries and CTEs: SELECT non_colocated_subquery.value_2, non_colocated_subquery_2.cnt FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('86_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) non_colocated_subquery, (SELECT intermediate_result.cnt FROM read_intermediate_result('86_2'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) non_colocated_subquery_2 WHERE (non_colocated_subquery.value_2 OPERATOR(pg_catalog.<>) non_colocated_subquery_2.cnt) - valid -------- +DEBUG: generating subplan XXX_1 for CTE non_colocated_subquery: SELECT foo.value_2 FROM (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.value_2) +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.value_2 FROM (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.value_2) +DEBUG: generating subplan XXX_2 for CTE non_colocated_subquery_2: SELECT count(*) AS cnt FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT events_table_1.event_type FROM public.events_table events_table_1 WHERE (events_table_1.user_id OPERATOR(pg_catalog.<) 4))) +DEBUG: generating subplan XXX_1 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 4) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS cnt FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT non_colocated_subquery.value_2, non_colocated_subquery_2.cnt FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) non_colocated_subquery, (SELECT intermediate_result.cnt FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) non_colocated_subquery_2 WHERE (non_colocated_subquery.value_2 OPERATOR(pg_catalog.<>) non_colocated_subquery_2.cnt) + valid +--------------------------------------------------------------------- t (1 row) @@ -836,11 +836,11 @@ SELECT true AS valid FROM explain_json_2($$ AND foo.value_2 = baz.value_2 $$); -DEBUG: generating subplan 91_1 for subquery SELECT users_table_local.value_2 FROM non_colocated_subquery.users_table_local, non_colocated_subquery.events_table_local WHERE ((users_table_local.user_id OPERATOR(pg_catalog.=) events_table_local.user_id) AND (events_table_local.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) -DEBUG: generating subplan 91_2 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12]))) -DEBUG: Plan 91 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.value_2 FROM read_intermediate_result('91_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) bar, (SELECT intermediate_result.value_2 FROM read_intermediate_result('91_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) baz WHERE ((foo.value_2 OPERATOR(pg_catalog.=) bar.value_2) AND (foo.value_2 OPERATOR(pg_catalog.=) baz.value_2)) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table_local.value_2 FROM non_colocated_subquery.users_table_local, non_colocated_subquery.events_table_local WHERE ((users_table_local.user_id OPERATOR(pg_catalog.=) events_table_local.user_id) AND (events_table_local.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) +DEBUG: generating subplan XXX_2 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) bar, (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) baz WHERE ((foo.value_2 OPERATOR(pg_catalog.=) bar.value_2) AND (foo.value_2 OPERATOR(pg_catalog.=) baz.value_2)) + valid +--------------------------------------------------------------------- t (1 row) @@ -871,12 +871,12 @@ SELECT true AS valid FROM explain_json_2($$ AND foo.user_id IN (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (1,2)) $$); -DEBUG: generating subplan 93_1 for subquery SELECT value_1, value_2 FROM public.users_table -DEBUG: generating subplan 93_2 for subquery SELECT value_1 FROM public.users_table WHERE (value_2 OPERATOR(pg_catalog.<) 1) -DEBUG: generating subplan 93_3 for subquery SELECT value_2 FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.<) 2) -DEBUG: Plan 93 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT users_table.user_id FROM public.users_table) foo JOIN (SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) UNION SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) a ON ((a.user_id OPERATOR(pg_catalog.=) foo.user_id))) JOIN (SELECT intermediate_result.value_1, intermediate_result.value_2 FROM read_intermediate_result('93_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer, value_2 integer)) bar ON ((foo.user_id OPERATOR(pg_catalog.=) bar.value_1))) WHERE ((bar.value_2 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_1 FROM read_intermediate_result('93_2'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer))) AND (bar.value_1 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('93_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) AND (foo.user_id OPERATOR(pg_catalog.=) ANY (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2])))))) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT value_1, value_2 FROM public.users_table +DEBUG: generating subplan XXX_2 for subquery SELECT value_1 FROM public.users_table WHERE (value_2 OPERATOR(pg_catalog.<) 1) +DEBUG: generating subplan XXX_3 for subquery SELECT value_2 FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.<) 2) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT users_table.user_id FROM public.users_table) foo JOIN (SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) UNION SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) a ON ((a.user_id OPERATOR(pg_catalog.=) foo.user_id))) JOIN (SELECT intermediate_result.value_1, intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer, value_2 integer)) bar ON ((foo.user_id OPERATOR(pg_catalog.=) bar.value_1))) WHERE ((bar.value_2 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer))) AND (bar.value_1 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) AND (foo.user_id OPERATOR(pg_catalog.=) ANY (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2])))))) + valid +--------------------------------------------------------------------- t (1 row) @@ -893,10 +893,10 @@ SELECT true AS valid FROM explain_json_2($$ users_table_ref.user_id = foo.user_id AND foo.user_id = bar.value_2; $$); -DEBUG: generating subplan 97_1 for subquery SELECT user_id, value_2 FROM public.events_table -DEBUG: Plan 97 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_reference_table users_table_ref, (SELECT users_table.user_id FROM public.users_table) foo, (SELECT intermediate_result.user_id, intermediate_result.value_2 FROM read_intermediate_result('97_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) bar WHERE ((users_table_ref.user_id OPERATOR(pg_catalog.=) foo.user_id) AND (foo.user_id OPERATOR(pg_catalog.=) bar.value_2)) - valid -------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, value_2 FROM public.events_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_reference_table users_table_ref, (SELECT users_table.user_id FROM public.users_table) foo, (SELECT intermediate_result.user_id, intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) bar WHERE ((users_table_ref.user_id OPERATOR(pg_catalog.=) foo.user_id) AND (foo.user_id OPERATOR(pg_catalog.=) bar.value_2)) + valid +--------------------------------------------------------------------- t (1 row) @@ -917,11 +917,11 @@ JOIN LATERAL LEFT JOIN users_table u2 ON u2.user_id = bar.value_2) AS foo ON TRUE; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 99_1 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table OFFSET 0 +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table OFFSET 0 DEBUG: skipping recursive planning for the subquery since it contains references to outer queries DEBUG: skipping recursive planning for the subquery since it contains references to outer queries DEBUG: skipping recursive planning for the subquery since it contains references to outer queries -DEBUG: Plan 99 query after replacing subqueries and CTEs: SELECT users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4, foo.user_id, foo."time", foo.event_type, foo.value_2, foo.value_3, foo.value_4, foo.user_id_1 AS user_id, foo.time_1 AS "time", foo.value_1, foo.value_2_1 AS value_2, foo.value_3_1 AS value_3, foo.value_4_1 AS value_4 FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('99_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) users_table JOIN LATERAL (SELECT bar.user_id, bar."time", bar.event_type, bar.value_2, bar.value_3, bar.value_4, u2.user_id, u2."time", u2.value_1, u2.value_2, u2.value_3, u2.value_4 FROM ((SELECT events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4 FROM public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)) bar LEFT JOIN public.users_table u2 ON ((u2.user_id OPERATOR(pg_catalog.=) bar.value_2)))) foo(user_id, "time", event_type, value_2, value_3, value_4, user_id_1, time_1, value_1, value_2_1, value_3_1, value_4_1) ON (true)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4, foo.user_id, foo."time", foo.event_type, foo.value_2, foo.value_3, foo.value_4, foo.user_id_1 AS user_id, foo.time_1 AS "time", foo.value_1, foo.value_2_1 AS value_2, foo.value_3_1 AS value_3, foo.value_4_1 AS value_4 FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) users_table JOIN LATERAL (SELECT bar.user_id, bar."time", bar.event_type, bar.value_2, bar.value_3, bar.value_4, u2.user_id, u2."time", u2.value_1, u2.value_2, u2.value_3, u2.value_4 FROM ((SELECT events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4 FROM public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)) bar LEFT JOIN public.users_table u2 ON ((u2.user_id OPERATOR(pg_catalog.=) bar.value_2)))) foo(user_id, "time", event_type, value_2, value_3, value_4, user_id_1, time_1, value_1, value_2_1, value_3_1, value_4_1) ON (true)) DEBUG: Router planner cannot handle multi-shard select queries DEBUG: skipping recursive planning for the subquery since it contains references to outer queries DEBUG: skipping recursive planning for the subquery since it contains references to outer queries @@ -976,20 +976,20 @@ JOIN LATERAL LEFT JOIN users_table u2 ON u2.user_id = bar.value_2) AS foo ON TRUE; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 102_1 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 102_2 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.>) 2) +DEBUG: generating subplan XXX_2 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.>) 2) DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 102_3 for subquery SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('102_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint) EXCEPT SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('102_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint) +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint) EXCEPT SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint) DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 102_4 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM public.events_table WHERE (value_3 OPERATOR(pg_catalog.>) (4)::double precision) +DEBUG: generating subplan XXX_4 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM public.events_table WHERE (value_3 OPERATOR(pg_catalog.>) (4)::double precision) DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 102_5 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.>) 2) +DEBUG: generating subplan XXX_5 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.>) 2) DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 102_6 for subquery SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('102_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint) INTERSECT SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('102_5'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint) -DEBUG: Plan 102 query after replacing subqueries and CTEs: SELECT users_table_limited.user_id, users_table_limited."time", users_table_limited.value_1, users_table_limited.value_2, users_table_limited.value_3, users_table_limited.value_4, foo.user_id, foo."time", foo.event_type, foo.value_2, foo.value_3, foo.value_4, foo.user_id_1 AS user_id, foo.time_1 AS "time", foo.value_1, foo.value_2_1 AS value_2, foo.value_3_1 AS value_3, foo.value_4_1 AS value_4 FROM ((SELECT users_table_union.user_id, users_table_union."time", users_table_union.value_1, users_table_union.value_2, users_table_union.value_3, users_table_union.value_4 FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('102_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) users_table_union) users_table_limited JOIN LATERAL (SELECT bar.user_id, bar."time", bar.event_type, bar.value_2, bar.value_3, bar.value_4, u2.user_id, u2."time", u2.value_1, u2.value_2, u2.value_3, u2.value_4 FROM ((SELECT events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4 FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('102_6'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint)) events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) users_table_limited.user_id)) bar LEFT JOIN public.users_table u2 ON ((u2.user_id OPERATOR(pg_catalog.=) bar.value_2)))) foo(user_id, "time", event_type, value_2, value_3, value_4, user_id_1, time_1, value_1, value_2_1, value_3_1, value_4_1) ON (true)) +DEBUG: generating subplan XXX_6 for subquery SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint) INTERSECT SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT users_table_limited.user_id, users_table_limited."time", users_table_limited.value_1, users_table_limited.value_2, users_table_limited.value_3, users_table_limited.value_4, foo.user_id, foo."time", foo.event_type, foo.value_2, foo.value_3, foo.value_4, foo.user_id_1 AS user_id, foo.time_1 AS "time", foo.value_1, foo.value_2_1 AS value_2, foo.value_3_1 AS value_3, foo.value_4_1 AS value_4 FROM ((SELECT users_table_union.user_id, users_table_union."time", users_table_union.value_1, users_table_union.value_2, users_table_union.value_3, users_table_union.value_4 FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) users_table_union) users_table_limited JOIN LATERAL (SELECT bar.user_id, bar."time", bar.event_type, bar.value_2, bar.value_3, bar.value_4, u2.user_id, u2."time", u2.value_1, u2.value_2, u2.value_3, u2.value_4 FROM ((SELECT events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4 FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_6'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint)) events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) users_table_limited.user_id)) bar LEFT JOIN public.users_table u2 ON ((u2.user_id OPERATOR(pg_catalog.=) bar.value_2)))) foo(user_id, "time", event_type, value_2, value_3, value_4, user_id_1, time_1, value_1, value_2_1, value_3_1, value_4_1) ON (true)) DEBUG: Router planner cannot handle multi-shard select queries ERROR: cannot pushdown the subquery -- similar to the above, but this time there are multiple @@ -1023,19 +1023,19 @@ SELECT count(*) FROM events_table WHERE user_id NOT IN ); DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 109_1 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 109_2 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.>) 2) +DEBUG: generating subplan XXX_2 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.>) 2) DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 109_3 for subquery SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('109_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint) EXCEPT SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('109_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint) +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint) EXCEPT SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint) DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 109_4 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM public.events_table WHERE (value_3 OPERATOR(pg_catalog.>) (4)::double precision) +DEBUG: generating subplan XXX_4 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM public.events_table WHERE (value_3 OPERATOR(pg_catalog.>) (4)::double precision) DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 109_5 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.>) 2) +DEBUG: generating subplan XXX_5 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.>) 2) DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 109_6 for subquery SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('109_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint) INTERSECT SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('109_5'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint) +DEBUG: generating subplan XXX_6 for subquery SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint) INTERSECT SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint) DEBUG: Router planner cannot handle multi-shard select queries ERROR: cannot pushdown the subquery -- make sure that non-colocated subquery joins work fine in @@ -1047,15 +1047,15 @@ CREATE TABLE table2_p1 PARTITION OF table2 FOR VALUES FROM (1) TO (10); -- modifications on the partitons are only allowed with rep=1 SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('table2','tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('table1','tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- all of the above queries are non-colocated subquery joins @@ -1063,15 +1063,15 @@ SELECT create_distributed_table('table1','tenant_id'); UPDATE table2 SET id=20 FROM table1_view WHERE table1_view.id=table2.id; DEBUG: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 117_1 for subquery SELECT table1.id, table1.tenant_id FROM non_colocated_subquery.table1 WHERE (table1.id OPERATOR(pg_catalog.<) 100) -DEBUG: Plan 117 query after replacing subqueries and CTEs: UPDATE non_colocated_subquery.table2 SET id = 20 FROM (SELECT intermediate_result.id, intermediate_result.tenant_id FROM read_intermediate_result('117_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, tenant_id integer)) table1_view WHERE (table1_view.id OPERATOR(pg_catalog.=) table2.id) +DEBUG: generating subplan XXX_1 for subquery SELECT table1.id, table1.tenant_id FROM non_colocated_subquery.table1 WHERE (table1.id OPERATOR(pg_catalog.<) 100) +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE non_colocated_subquery.table2 SET id = 20 FROM (SELECT intermediate_result.id, intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, tenant_id integer)) table1_view WHERE (table1_view.id OPERATOR(pg_catalog.=) table2.id) DEBUG: Creating router plan DEBUG: Plan is router executable UPDATE table2_p1 SET id=20 FROM table1_view WHERE table1_view.id=table2_p1.id; DEBUG: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 119_1 for subquery SELECT table1.id, table1.tenant_id FROM non_colocated_subquery.table1 WHERE (table1.id OPERATOR(pg_catalog.<) 100) -DEBUG: Plan 119 query after replacing subqueries and CTEs: UPDATE non_colocated_subquery.table2_p1 SET id = 20 FROM (SELECT intermediate_result.id, intermediate_result.tenant_id FROM read_intermediate_result('119_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, tenant_id integer)) table1_view WHERE (table1_view.id OPERATOR(pg_catalog.=) table2_p1.id) +DEBUG: generating subplan XXX_1 for subquery SELECT table1.id, table1.tenant_id FROM non_colocated_subquery.table1 WHERE (table1.id OPERATOR(pg_catalog.<) 100) +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE non_colocated_subquery.table2_p1 SET id = 20 FROM (SELECT intermediate_result.id, intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, tenant_id integer)) table1_view WHERE (table1_view.id OPERATOR(pg_catalog.=) table2_p1.id) DEBUG: Creating router plan DEBUG: Plan is router executable RESET client_min_messages; diff --git a/src/test/regress/expected/partitioned_intermediate_results.out b/src/test/regress/expected/partitioned_intermediate_results.out index 560cba703..12bfb4a9a 100644 --- a/src/test/regress/expected/partitioned_intermediate_results.out +++ b/src/test/regress/expected/partitioned_intermediate_results.out @@ -7,8 +7,8 @@ SELECT * FROM worker_partition_query_result('squares_hash', 'SELECT i, i * i FROM generate_series(1, 10) i', 0, 'hash', '{-2147483648,-1073741824,0,1073741824}'::text[], '{-1073741825,-1,1073741823,2147483647}'::text[], false); - partition_index | rows_written | bytes_written ------------------+--------------+--------------- + partition_index | rows_written | bytes_written +--------------------------------------------------------------------- 0 | 4 | 21 1 | 3 | 14 2 | 1 | 5 @@ -18,8 +18,8 @@ SELECT * FROM worker_partition_query_result('squares_hash', SELECT hashint4(x), x, x2 FROM read_intermediate_result('squares_hash_0', 'text') AS res (x int, x2 int) ORDER BY x; - hashint4 | x | x2 --------------+----+----- + hashint4 | x | x2 +--------------------------------------------------------------------- -1905060026 | 1 | 1 -1330264708 | 5 | 25 -2047600124 | 8 | 64 @@ -29,8 +29,8 @@ ORDER BY x; SELECT hashint4(x), x, x2 FROM read_intermediate_result('squares_hash_1', 'text') AS res (x int, x2 int) ORDER BY x; - hashint4 | x | x2 --------------+---+---- + hashint4 | x | x2 +--------------------------------------------------------------------- -28094569 | 3 | 9 -1011077333 | 4 | 16 -978793473 | 7 | 49 @@ -39,16 +39,16 @@ ORDER BY x; SELECT hashint4(x), x, x2 FROM read_intermediate_result('squares_hash_2', 'text') AS res (x int, x2 int) ORDER BY x; - hashint4 | x | x2 ------------+---+---- + hashint4 | x | x2 +--------------------------------------------------------------------- 566031088 | 6 | 36 (1 row) SELECT hashint4(x), x, x2 FROM read_intermediate_result('squares_hash_3', 'text') AS res (x int, x2 int) ORDER BY x; - hashint4 | x | x2 -------------+---+---- + hashint4 | x | x2 +--------------------------------------------------------------------- 1134484726 | 2 | 4 1672378334 | 9 | 81 (2 rows) @@ -63,8 +63,8 @@ SELECT * FROM worker_partition_query_result('squares_range', '{0,21,41,61}'::text[], '{20,40,60,100}'::text[], true /* binary format */); - partition_index | rows_written | bytes_written ------------------+--------------+--------------- + partition_index | rows_written | bytes_written +--------------------------------------------------------------------- 0 | 4 | 93 1 | 2 | 57 2 | 1 | 39 @@ -74,8 +74,8 @@ SELECT * FROM worker_partition_query_result('squares_range', SELECT x, x2 FROM read_intermediate_result('squares_range_0', 'binary') AS res (x int, x2 int) ORDER BY x; - x | x2 ----+---- + x | x2 +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 @@ -85,8 +85,8 @@ ORDER BY x; SELECT x, x2 FROM read_intermediate_result('squares_range_1', 'binary') AS res (x int, x2 int) ORDER BY x; - x | x2 ----+---- + x | x2 +--------------------------------------------------------------------- 5 | 25 6 | 36 (2 rows) @@ -94,16 +94,16 @@ ORDER BY x; SELECT x, x2 FROM read_intermediate_result('squares_range_2', 'binary') AS res (x int, x2 int) ORDER BY x; - x | x2 ----+---- + x | x2 +--------------------------------------------------------------------- 7 | 49 (1 row) SELECT x, x2 FROM read_intermediate_result('squares_range_3', 'binary') AS res (x int, x2 int) ORDER BY x; - x | x2 -----+----- + x | x2 +--------------------------------------------------------------------- 8 | 64 9 | 81 10 | 100 @@ -116,8 +116,8 @@ SELECT * FROM worker_partition_query_result('doubles_hash', 'SELECT i, i * 2 FROM generate_series(1, 1000000) i', 0, 'hash', '{-2147483648,-1073741824,0,1073741824}'::text[], '{-1073741825,-1,1073741823,2147483647}'::text[], false); - partition_index | rows_written | bytes_written ------------------+--------------+--------------- + partition_index | rows_written | bytes_written +--------------------------------------------------------------------- 0 | 250199 | 3586179 1 | 249872 | 3581280 2 | 250278 | 3587487 @@ -126,10 +126,10 @@ SELECT * FROM worker_partition_query_result('doubles_hash', SELECT count(*) FROM read_intermediate_results(ARRAY['doubles_hash_0', 'doubles_hash_1', - 'doubles_hash_2', + 'doubles_hash_2', 'doubles_hash_3'], 'text') AS res (x int, x2 int); - count ---------- + count +--------------------------------------------------------------------- 1000000 (1 row) @@ -140,8 +140,8 @@ SELECT * FROM worker_partition_query_result('doubles_range', 'SELECT i, i * 2 FROM generate_series(1, 1000000) i', 0, 'range', '{0,250001,500001,750001}'::text[], '{250000,500000,750000,1000000}'::text[], true); - partition_index | rows_written | bytes_written ------------------+--------------+--------------- + partition_index | rows_written | bytes_written +--------------------------------------------------------------------- 0 | 250000 | 4500021 1 | 250000 | 4500021 2 | 250000 | 4500021 @@ -150,10 +150,10 @@ SELECT * FROM worker_partition_query_result('doubles_range', SELECT count(*) FROM read_intermediate_results(ARRAY['doubles_range_0', 'doubles_range_1', - 'doubles_range_2', + 'doubles_range_2', 'doubles_range_3'], 'binary') AS res (x int, x2 int); - count ---------- + count +--------------------------------------------------------------------- 1000000 (1 row) @@ -176,8 +176,6 @@ SELECT worker_partition_query_result('squares_range', '{20,40,60,100}'::text[], true); ERROR: syntax error at or near "SELECxT" -LINE 1: SELECT worker_partition_query_result('squares_range', - ^ ROLLBACK TO SAVEPOINT s1; -- invalid result prefix SELECT worker_partition_query_result('squares_range/a/', @@ -364,9 +362,9 @@ $$ LANGUAGE plpgsql; SET citus.shard_count TO 32; CREATE TABLE t(a int, b int); SELECT create_distributed_table('t', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CALL test_partition_query_results('t', 'SELECT x, x * x FROM generate_series(1, 100) x'); @@ -378,9 +376,9 @@ DROP TABLE t; SET citus.shard_count TO 1; CREATE TABLE t(a int, b int); SELECT create_distributed_table('t', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CALL test_partition_query_results('t', 'SELECT x, x * x FROM generate_series(1, 100) x'); @@ -392,9 +390,9 @@ DROP TABLE t; SET citus.shard_count TO 17; CREATE TABLE t(a int, b int); SELECT create_distributed_table('t', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CALL test_partition_query_results('t', 'SELECT x, x * x FROM generate_series(1, 100) x'); @@ -406,9 +404,9 @@ DROP TABLE t; SET citus.shard_count TO 8; CREATE TABLE t(a DATE, b int); SELECT create_distributed_table('t', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CALL test_partition_query_results('t', 'SELECT (''1985-05-18''::date + (x::text || '' days'')::interval)::date, x * x FROM generate_series(1, 100) x'); @@ -420,9 +418,9 @@ DROP TABLE t; SET citus.shard_count TO 8; CREATE TABLE t(a int4range, b int); SELECT create_distributed_table('t', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CALL test_partition_query_results('t', 'SELECT int4range(x,2*x+10), x * x FROM generate_series(1, 100) x'); @@ -433,9 +431,9 @@ DROP TABLE t; -- range partitioning, int partition column CREATE TABLE t(key int, value int); SELECT create_distributed_table('t', 'key', 'range'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CALL create_range_partitioned_shards('t', '{0,25,50,76}', @@ -448,9 +446,9 @@ DROP TABLE t; -- not covering ranges, should ERROR CREATE TABLE t(key int, value int); SELECT create_distributed_table('t', 'key', 'range'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CALL create_range_partitioned_shards('t', '{0,25,50,100}', @@ -461,9 +459,9 @@ DROP TABLE t; -- overlapping ranges, we allow this in range partitioned distributed tables, should be fine CREATE TABLE t(key int, value int); SELECT create_distributed_table('t', 'key', 'range'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CALL create_range_partitioned_shards('t', '{0,25,50,76}', @@ -478,9 +476,9 @@ CREATE TYPE composite_key_type AS (f1 int, f2 text); SET citus.shard_count TO 8; CREATE TABLE t(key composite_key_type, value int); SELECT create_distributed_table('t', 'key', 'range'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CALL create_range_partitioned_shards('t', '{"(0,a)","(25,a)","(50,a)","(75,a)"}', @@ -494,9 +492,9 @@ DROP TYPE composite_key_type; -- unsorted ranges CREATE TABLE t(key int, value int); SELECT create_distributed_table('t', 'key', 'range'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CALL create_range_partitioned_shards('t', '{50,25,76,0}', diff --git a/src/test/regress/expected/pg12.out b/src/test/regress/expected/pg12.out index 8e5350b42..001910977 100644 --- a/src/test/regress/expected/pg12.out +++ b/src/test/regress/expected/pg12.out @@ -35,9 +35,9 @@ insert into gen1 (id, val1) values (1,4),(3,6),(5,2),(7,2); insert into gen2 (id, val1) values (1,4),(3,6),(5,2),(7,2); select create_distributed_table('gen1', 'id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) select create_distributed_table('gen2', 'val2'); @@ -46,8 +46,8 @@ DETAIL: Distribution column must not use GENERATED ALWAYS AS (...) STORED. insert into gen1 (id, val1) values (2,4),(4,6),(6,2),(8,2); insert into gen2 (id, val1) values (2,4),(4,6),(6,2),(8,2); select * from gen1 order by 1,2,3; - id | val1 | val2 -----+------+------ + id | val1 | val2 +--------------------------------------------------------------------- 1 | 4 | 6 2 | 4 | 6 3 | 6 | 8 @@ -59,8 +59,8 @@ select * from gen1 order by 1,2,3; (8 rows) select * from gen2 order by 1,2,3; - id | val1 | val2 -----+------+------ + id | val1 | val2 +--------------------------------------------------------------------- 1 | 4 | 6 2 | 4 | 6 3 | 6 | 8 @@ -79,9 +79,9 @@ vacuum (index_cleanup 1) gen1; -- COPY FROM create table cptest (id int, val int); select create_distributed_table('cptest', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) copy cptest from STDIN with csv where val < 4; @@ -95,21 +95,19 @@ ERROR: Citus does not support COPY FROM with WHERE invalid command \. select sum(id), sum(val) from cptest; ERROR: syntax error at or near "1" -LINE 1: 1,6 - ^ -- CTE materialized/not materialized CREATE TABLE single_hash_repartition_first (id int, sum int, avg float); CREATE TABLE single_hash_repartition_second (id int primary key, sum int, avg float); SELECT create_distributed_table('single_hash_repartition_first', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('single_hash_repartition_second', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO single_hash_repartition_first @@ -131,8 +129,8 @@ SELECT count(*) FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.id = 45; $Q$); - coordinator_plan ------------------------------- + coordinator_plan +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 (2 rows) @@ -150,8 +148,8 @@ SELECT count(*) FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.id = 45; $Q$); - coordinator_plan ------------------------------- + coordinator_plan +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 (2 rows) @@ -170,10 +168,10 @@ SELECT count(*) FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.id = 45; $Q$); - coordinator_plan ------------------------------------------- + coordinator_plan +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) - -> Distributed Subplan 7_1 + -> Distributed Subplan XXX_1 -> Custom Scan (Citus Adaptive) Task Count: 4 (4 rows) @@ -193,8 +191,8 @@ SELECT count(*) FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.id = 45; $Q$); - coordinator_plan ------------------------------- + coordinator_plan +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 (2 rows) @@ -214,11 +212,11 @@ SELECT count(*) FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.sum = 45; $Q$); - coordinator_plan ------------------------------------------------- + coordinator_plan +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - -> Distributed Subplan 10_1 + -> Distributed Subplan XXX_1 -> Custom Scan (Citus Adaptive) Task Count: 4 (5 rows) @@ -235,11 +233,11 @@ SELECT count(*) FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.sum = 45; $Q$); - coordinator_plan ------------------------------------------------- + coordinator_plan +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - -> Distributed Subplan 12_1 + -> Distributed Subplan XXX_1 -> Custom Scan (Citus Adaptive) Task Count: 4 (5 rows) @@ -270,23 +268,23 @@ ERROR: insert or update on table "collection_users" violates foreign key constr DETAIL: Key (key, collection_id)=(1, 1000) is not present in table "collections_list". SELECT create_distributed_table('collections_list', 'key'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('collection_users', 'key'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- should still fail because of fkey INSERT INTO collection_users VALUES (1, 1000, 1); ERROR: insert or update on table "collection_users_60028" violates foreign key constraint "collection_users_fkey_60028" DETAIL: Key (key, collection_id)=(1, 1000) is not present in table "collections_list_60016". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- whereas new record with partition should go through INSERT INTO collections_list VALUES (2, 1, '1.2'); INSERT INTO collection_users VALUES (5, 1, 2); @@ -295,9 +293,9 @@ CREATE TABLE test (x int, y int); INSERT INTO test (x,y) SELECT i,i*3 from generate_series(1, 100) i; SELECT create_distributed_table('test', 'x'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- single shard queries with CHAIN @@ -305,8 +303,8 @@ BEGIN; UPDATE test SET y = 15 WHERE x = 1; COMMIT AND CHAIN; SELECT * FROM test WHERE x = 1; - x | y ----+---- + x | y +--------------------------------------------------------------------- 1 | 15 (1 row) @@ -315,8 +313,8 @@ BEGIN; UPDATE test SET y = 20 WHERE x = 1; ROLLBACK AND CHAIN; SELECT * FROM test WHERE x = 1; - x | y ----+---- + x | y +--------------------------------------------------------------------- 1 | 15 (1 row) @@ -326,8 +324,8 @@ BEGIN; UPDATE test SET y = 25; COMMIT AND CHAIN; SELECT DISTINCT y FROM test; - y ----- + y +--------------------------------------------------------------------- 25 (1 row) @@ -336,8 +334,8 @@ BEGIN; UPDATE test SET y = 30; ROLLBACK AND CHAIN; SELECT DISTINCT y FROM test; - y ----- + y +--------------------------------------------------------------------- 25 (1 row) @@ -349,8 +347,8 @@ UPDATE test SET y = 35; ERROR: cannot execute UPDATE in a read-only transaction COMMIT; SELECT DISTINCT y FROM test; - y ----- + y +--------------------------------------------------------------------- 25 (1 row) @@ -360,8 +358,8 @@ UPDATE test SET y = 40; ERROR: cannot execute UPDATE in a read-only transaction COMMIT; SELECT DISTINCT y FROM test; - y ----- + y +--------------------------------------------------------------------- 25 (1 row) @@ -382,9 +380,9 @@ select create_distributed_table('col_test', 'val'); ERROR: Hash distributed partition columns may not use a non deterministic collation select create_distributed_table('col_test', 'id'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) insert into col_test values @@ -392,8 +390,8 @@ insert into col_test values select count(*) from col_test where val = 'asdf'; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) diff --git a/src/test/regress/expected/propagate_extension_commands.out b/src/test/regress/expected/propagate_extension_commands.out index cd23afe6e..64166ac65 100644 --- a/src/test/regress/expected/propagate_extension_commands.out +++ b/src/test/regress/expected/propagate_extension_commands.out @@ -6,36 +6,36 @@ SET search_path TO "extension'test"; CREATE EXTENSION seg; -- make sure that both the schema and the extension is distributed SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_namespace WHERE nspname = 'extension''test'); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) CREATE TABLE test_table (key int, value seg); SELECT create_distributed_table('test_table', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- make sure that the table is also distributed now SELECT count(*) from pg_dist_partition where logicalrelid='extension''test.test_table'::regclass; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) CREATE TYPE two_segs AS (seg_1 seg, seg_2 seg); -- verify that the type that depends on the extension is also marked as distributed SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_type WHERE typname = 'two_segs' AND typnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'extension''test')); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -46,35 +46,35 @@ BEGIN; -- this should not succeed as we do not distribute extension commands within transaction blocks CREATE TABLE dist_table (key int, value public.issn); SELECT create_distributed_table('dist_table', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- we can even run queries (sequentially) over the distributed table SELECT * FROM dist_table; - key | value ------+------- + key | value +--------------------------------------------------------------------- (0 rows) INSERT INTO dist_table VALUES (1, public.issn('1436-4522')); INSERT INTO dist_table SELECT * FROM dist_table RETURNING *; - key | value ------+----------- + key | value +--------------------------------------------------------------------- 1 | 1436-4522 (1 row) COMMIT; -- make sure that the extension is distributed even if we run create extension in a transaction block SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn'); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'isn'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,1) (localhost,57638,t,1) (2 rows) @@ -82,9 +82,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname CREATE TABLE ref_table (a public.issn); -- now, create a reference table relying on the data types SELECT create_reference_table('ref_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- now, drop the extension, recreate it with an older version and update it to latest version @@ -94,8 +94,8 @@ CREATE EXTENSION isn WITH VERSION "1.1"; RESET client_min_messages; -- before updating the version, ensure the current version SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'isn'$$); - run_command_on_workers -------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,1.1) (localhost,57638,t,1.1) (2 rows) @@ -104,16 +104,16 @@ SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extnam ALTER EXTENSION isn UPDATE TO '1.2'; -- show that ALTER EXTENSION is propagated SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'isn'$$); - run_command_on_workers -------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,1.2) (localhost,57638,t,1.2) (2 rows) -- before changing the schema, ensure the current schmea SELECT run_command_on_workers($$SELECT nspname from pg_namespace where oid=(SELECT extnamespace FROM pg_extension WHERE extname = 'isn')$$); - run_command_on_workers ------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,extension'test) (localhost,57638,t,extension'test) (2 rows) @@ -124,15 +124,15 @@ ALTER EXTENSION isn SET SCHEMA public; SET search_path TO public; -- make sure that the extension is distributed SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn'); - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- show that the ALTER EXTENSION command is propagated SELECT run_command_on_workers($$SELECT nspname from pg_namespace where oid=(SELECT extnamespace FROM pg_extension WHERE extname = 'isn')$$); - run_command_on_workers ----------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,public) (localhost,57638,t,public) (2 rows) @@ -154,8 +154,8 @@ DROP EXTENSION seg CASCADE; DROP SCHEMA "extension'test" CASCADE; RESET client_min_messages; SELECT 1 from master_remove_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -163,58 +163,58 @@ SELECT 1 from master_remove_node('localhost', :worker_2_port); CREATE EXTENSION seg; -- show that the extension is created on existing worker SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,1) (1 row) SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$); - run_command_on_workers -------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,1.3) (1 row) -- now create the reference table CREATE TABLE ref_table_2 (x seg); SELECT create_reference_table('ref_table_2'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- and add the other node SELECT 1 from master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "ref_table_2" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "ref_table_2" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) -- show that the extension is created on both existing and new node SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,1) (localhost,57638,t,1) (2 rows) SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$); - run_command_on_workers -------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,1.3) (localhost,57638,t,1.3) (2 rows) -- and similarly check for the reference table select count(*) from pg_dist_partition where partmethod='n' and logicalrelid='ref_table_2'::regclass; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='ref_table_2'::regclass; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -226,15 +226,15 @@ ROLLBACK; -- at the end of the transaction block, we did not create isn extension in coordinator or worker nodes as we rollback'ed -- make sure that the extension is not distributed SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn'); - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- and the extension does not exist on workers SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'isn'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) (2 rows) @@ -261,8 +261,8 @@ set citus.enable_ddl_propagation to 'on'; SET client_min_messages TO WARNING; DROP EXTENSION pg_buffercache, isn CASCADE; SELECT count(*) FROM pg_extension WHERE extname IN ('pg_buffercache', 'isn'); - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -273,14 +273,14 @@ SET client_min_messages TO WARNING; -- drop extension should just work DROP EXTENSION seg CASCADE; SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'seg'$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) (2 rows) @@ -289,8 +289,8 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname RESET client_min_messages; -- make sure that the extension is not avaliable anymore as a distributed object SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname IN ('seg', 'isn')); - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -301,25 +301,25 @@ SET search_path TO "extension'test"; BEGIN; CREATE TABLE some_random_table (a int); SELECT create_distributed_table('some_random_table', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE EXTENSION seg; CREATE TABLE some_random_table_2 (a int, b seg); SELECT create_distributed_table('some_random_table_2', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ROLLBACK; -- show that the CREATE EXTENSION command propagated even if the transaction -- block is rollbacked, that's a shortcoming of dependency creation logic SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$); - run_command_on_workers -------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,1.3) (localhost,57638,t,1.3) (2 rows) @@ -334,8 +334,8 @@ SET search_path TO "extension'test"; RESET client_min_messages; -- remove the node, we'll add back again SELECT 1 from master_remove_node('localhost', :worker_2_port); - ?column? ----------- + ?column? +--------------------------------------------------------------------- 1 (1 row) @@ -349,38 +349,38 @@ BEGIN; CREATE TYPE test_type_2 AS (a int, b test_type); CREATE TABLE t2 (a int, b test_type_2, c issn); SELECT create_distributed_table('t2', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TYPE test_type_3 AS (a int, b test_type, c issn); CREATE TABLE t3 (a int, b test_type_3); SELECT create_reference_table('t3'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) COMMIT; -- add the node back SELECT 1 from master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "t3" to the node localhost:57638 - ?column? ----------- +NOTICE: Replicating reference table "t3" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- 1 (1 row) -- make sure that both extensions are created on both nodes SELECT count(*) FROM citus.pg_dist_object WHERE objid IN (SELECT oid FROM pg_extension WHERE extname IN ('seg', 'isn')); - count -------- + count +--------------------------------------------------------------------- 2 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname IN ('seg', 'isn')$$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,2) (localhost,57638,t,2) (2 rows) diff --git a/src/test/regress/expected/propagate_set_commands.out b/src/test/regress/expected/propagate_set_commands.out index 8e34bcd96..2a906162e 100644 --- a/src/test/regress/expected/propagate_set_commands.out +++ b/src/test/regress/expected/propagate_set_commands.out @@ -2,9 +2,9 @@ CREATE SCHEMA propagate_set_commands; SET search_path TO propagate_set_commands; CREATE TABLE test (id int, value int); SELECT create_distributed_table('test', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test VALUES (1,1), (3,3); @@ -15,8 +15,8 @@ SET citus.task_executor_type TO 'adaptive'; SET citus.select_opens_transaction_block TO on; BEGIN; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting ------------------ + current_setting +--------------------------------------------------------------------- on (1 row) @@ -28,8 +28,8 @@ BEGIN; -- set session commands are not propagated SET enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting ------------------ + current_setting +--------------------------------------------------------------------- on (1 row) @@ -38,8 +38,8 @@ BEGIN; -- should not propagate exit_on_error SET LOCAL exit_on_error TO on; SELECT current_setting('exit_on_error') FROM test WHERE id = 1; - current_setting ------------------ + current_setting +--------------------------------------------------------------------- off (1 row) @@ -48,15 +48,15 @@ BEGIN; -- should be off on worker SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting ------------------ + current_setting +--------------------------------------------------------------------- off (1 row) -- expand to new node, set should still apply SELECT current_setting('enable_hashagg') FROM test WHERE id = 3; - current_setting ------------------ + current_setting +--------------------------------------------------------------------- off (1 row) @@ -65,23 +65,23 @@ BEGIN; -- should be off on worker SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting ------------------ + current_setting +--------------------------------------------------------------------- off (1 row) -- should be back on after set to default SET LOCAL enable_hashagg TO DEFAULT; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting ------------------ + current_setting +--------------------------------------------------------------------- on (1 row) -- expand to new node, set to default should still apply SELECT current_setting('enable_hashagg') FROM test WHERE id = 3; - current_setting ------------------ + current_setting +--------------------------------------------------------------------- on (1 row) @@ -90,16 +90,16 @@ BEGIN; -- should be off on worker SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting ------------------ + current_setting +--------------------------------------------------------------------- off (1 row) -- does not have the LOCAL keyword, not propagated SET enable_hashagg TO DEFAULT; SELECT current_setting('enable_hashagg') FROM test WHERE id = 3; - current_setting ------------------ + current_setting +--------------------------------------------------------------------- off (1 row) @@ -108,23 +108,23 @@ BEGIN; -- should be off on worker SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting ------------------ + current_setting +--------------------------------------------------------------------- off (1 row) -- should be back on after reset RESET enable_hashagg; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting ------------------ + current_setting +--------------------------------------------------------------------- on (1 row) -- expand to new node, reset should still apply SELECT current_setting('enable_hashagg') FROM test WHERE id = 3; - current_setting ------------------ + current_setting +--------------------------------------------------------------------- on (1 row) @@ -133,8 +133,8 @@ BEGIN; -- should be off on worker SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting ------------------ + current_setting +--------------------------------------------------------------------- off (1 row) @@ -142,16 +142,16 @@ SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; RESET ALL; SET search_path = 'propagate_set_commands'; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting ------------------ + current_setting +--------------------------------------------------------------------- on (1 row) -- funky case, we reset citus.propagate_set_commands, so not set again SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 3; - current_setting ------------------ + current_setting +--------------------------------------------------------------------- on (1 row) diff --git a/src/test/regress/expected/recursive_dml_queries_mx.out b/src/test/regress/expected/recursive_dml_queries_mx.out index 4e2aed32b..8ab760472 100644 --- a/src/test/regress/expected/recursive_dml_queries_mx.out +++ b/src/test/regress/expected/recursive_dml_queries_mx.out @@ -4,23 +4,23 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; CREATE TABLE recursive_dml_queries_mx.distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('distributed_table', 'tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE recursive_dml_queries_mx.second_distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('second_distributed_table', 'tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE recursive_dml_queries_mx.reference_table (id text, name text); SELECT create_reference_table('reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO distributed_table SELECT i::text, i % 10, row_to_json(row(i, i*i)) FROM generate_series (0, 100) i; @@ -28,75 +28,75 @@ INSERT INTO second_distributed_table SELECT i::text, i % 10, row_to_json(row(i, INSERT INTO reference_table SELECT i::text, 'user_' || i FROM generate_series (0, 100) i; SET client_min_messages TO DEBUG1; -- the subquery foo is recursively planned -UPDATE - reference_table -SET - name = 'new_' || name -FROM +UPDATE + reference_table +SET + name = 'new_' || name +FROM ( - SELECT + SELECT avg(second_distributed_table.tenant_id::int) as avg_tenant_id - FROM + FROM second_distributed_table ) as foo WHERE foo.avg_tenant_id::int::text = reference_table.id; -DEBUG: generating subplan 4_1 for subquery SELECT avg((tenant_id)::integer) AS avg_tenant_id FROM recursive_dml_queries_mx.second_distributed_table -DEBUG: Plan 4 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries_mx.reference_table SET name = ('new_'::text OPERATOR(pg_catalog.||) reference_table.name) FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) reference_table.id) +DEBUG: generating subplan XXX_1 for subquery SELECT avg((tenant_id)::integer) AS avg_tenant_id FROM recursive_dml_queries_mx.second_distributed_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_queries_mx.reference_table SET name = ('new_'::text OPERATOR(pg_catalog.||) reference_table.name) FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) reference_table.id) -- the subquery foo is recursively planned -- but note that the subquery foo itself is pushdownable -UPDATE - second_distributed_table -SET +UPDATE + second_distributed_table +SET dept = foo.max_dept * 2 -FROM +FROM ( - SELECT DISTINCT ON (tenant_id) tenant_id, max(dept) as max_dept FROM + SELECT DISTINCT ON (tenant_id) tenant_id, max(dept) as max_dept FROM ( - SELECT + SELECT second_distributed_table.dept, second_distributed_table.tenant_id - FROM + FROM second_distributed_table, distributed_table - WHERE + WHERE distributed_table.tenant_id = second_distributed_table.tenant_id ) foo_inner GROUP BY - tenant_id + tenant_id ORDER BY 1 DESC ) as foo WHERE - foo.tenant_id != second_distributed_table.tenant_id + foo.tenant_id != second_distributed_table.tenant_id AND second_distributed_table.dept IN (2); -DEBUG: generating subplan 6_1 for subquery SELECT DISTINCT ON (tenant_id) tenant_id, max(dept) AS max_dept FROM (SELECT second_distributed_table.dept, second_distributed_table.tenant_id FROM recursive_dml_queries_mx.second_distributed_table, recursive_dml_queries_mx.distributed_table WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.=) second_distributed_table.tenant_id)) foo_inner GROUP BY tenant_id ORDER BY tenant_id DESC -DEBUG: Plan 6 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries_mx.second_distributed_table SET dept = (foo.max_dept OPERATOR(pg_catalog.*) 2) FROM (SELECT intermediate_result.tenant_id, intermediate_result.max_dept FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, max_dept integer)) foo WHERE ((foo.tenant_id OPERATOR(pg_catalog.<>) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) 2)) +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT ON (tenant_id) tenant_id, max(dept) AS max_dept FROM (SELECT second_distributed_table.dept, second_distributed_table.tenant_id FROM recursive_dml_queries_mx.second_distributed_table, recursive_dml_queries_mx.distributed_table WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.=) second_distributed_table.tenant_id)) foo_inner GROUP BY tenant_id ORDER BY tenant_id DESC +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_queries_mx.second_distributed_table SET dept = (foo.max_dept OPERATOR(pg_catalog.*) 2) FROM (SELECT intermediate_result.tenant_id, intermediate_result.max_dept FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, max_dept integer)) foo WHERE ((foo.tenant_id OPERATOR(pg_catalog.<>) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) 2)) -- run some queries from worker nodes \c - - - :worker_1_port SET search_path TO recursive_dml_queries_mx, public; -- the subquery foo is recursively planned -- and foo itself is a non colocated subquery and recursively planned -UPDATE - second_distributed_table -SET +UPDATE + second_distributed_table +SET dept = foo.tenant_id::int / 4 -FROM +FROM ( - SELECT DISTINCT foo_inner_1.tenant_id FROM + SELECT DISTINCT foo_inner_1.tenant_id FROM ( - SELECT + SELECT second_distributed_table.dept, second_distributed_table.tenant_id - FROM + FROM second_distributed_table, distributed_table - WHERE + WHERE distributed_table.tenant_id = second_distributed_table.tenant_id AND second_distributed_table.dept IN (3,4) ) foo_inner_1, ( - SELECT - second_distributed_table.tenant_id - FROM + SELECT + second_distributed_table.tenant_id + FROM second_distributed_table, distributed_table - WHERE + WHERE distributed_table.tenant_id = second_distributed_table.tenant_id AND second_distributed_table.dept IN (4,5) @@ -104,60 +104,60 @@ FROM WHERE foo_inner_1.tenant_id != foo_inner_2.tenant_id ) as foo WHERE - foo.tenant_id != second_distributed_table.tenant_id + foo.tenant_id != second_distributed_table.tenant_id AND second_distributed_table.dept IN (3); -- use the second worker \c - - - :worker_2_port SET search_path TO recursive_dml_queries_mx, public; CREATE TABLE recursive_dml_queries_mx.local_table (id text, name text); INSERT INTO local_table SELECT i::text, 'user_' || i FROM generate_series (0, 100) i; -CREATE VIEW tenant_ids AS - SELECT - tenant_id, name - FROM +CREATE VIEW tenant_ids AS + SELECT + tenant_id, name + FROM distributed_table, reference_table - WHERE + WHERE distributed_table.dept::text = reference_table.id ORDER BY 2 DESC, 1 DESC; -- we currently do not allow local tables in modification queries -UPDATE - distributed_table -SET +UPDATE + distributed_table +SET dept = avg_tenant_id::int -FROM +FROM ( - SELECT + SELECT avg(local_table.id::int) as avg_tenant_id - FROM + FROM local_table ) as foo WHERE foo.avg_tenant_id::int::text = distributed_table.tenant_id RETURNING distributed_table.*; - tenant_id | dept | info ------------+------+------------------------ + tenant_id | dept | info +--------------------------------------------------------------------- 50 | 50 | {"f1": 50, "f2": 2500} (1 row) -- we currently do not allow views in modification queries -UPDATE - distributed_table -SET +UPDATE + distributed_table +SET dept = avg_tenant_id::int -FROM +FROM ( - SELECT + SELECT avg(tenant_id::int) as avg_tenant_id - FROM + FROM tenant_ids ) as foo WHERE foo.avg_tenant_id::int::text = distributed_table.tenant_id RETURNING distributed_table.*; - tenant_id | dept | info ------------+------+------------------------ + tenant_id | dept | info +--------------------------------------------------------------------- 50 | 50 | {"f1": 50, "f2": 2500} (1 row) diff --git a/src/test/regress/expected/recursive_dml_with_different_planners_executors.out b/src/test/regress/expected/recursive_dml_with_different_planners_executors.out index f1a801af4..4532b3bb2 100644 --- a/src/test/regress/expected/recursive_dml_with_different_planners_executors.out +++ b/src/test/regress/expected/recursive_dml_with_different_planners_executors.out @@ -2,23 +2,23 @@ CREATE SCHEMA recursive_dml_with_different_planner_executors; SET search_path TO recursive_dml_with_different_planner_executors, public; CREATE TABLE distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('distributed_table', 'tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE second_distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('second_distributed_table', 'tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE reference_table (id text, name text); SELECT create_reference_table('reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO distributed_table SELECT i::text, i % 10, row_to_json(row(i, i*i)) FROM generate_series (0, 100) i; @@ -26,51 +26,51 @@ INSERT INTO second_distributed_table SELECT i::text, i % 10, row_to_json(row(i, SET client_min_messages TO DEBUG1; -- subquery with router planner -- joined with a real-time query -UPDATE - distributed_table -SET dept = foo.dept FROM - (SELECT tenant_id, dept FROM second_distributed_table WHERE dept = 1 ) as foo, +UPDATE + distributed_table +SET dept = foo.dept FROM + (SELECT tenant_id, dept FROM second_distributed_table WHERE dept = 1 ) as foo, (SELECT tenant_id FROM second_distributed_table WHERE dept IN (1, 2, 3, 4) OFFSET 0) as bar WHERE foo.tenant_id = bar.tenant_id - AND distributed_table.tenant_id = bar.tenant_id; -DEBUG: generating subplan 3_1 for subquery SELECT tenant_id FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (dept OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) OFFSET 0 -DEBUG: Plan 3 query after replacing subqueries and CTEs: UPDATE recursive_dml_with_different_planner_executors.distributed_table SET dept = foo.dept FROM (SELECT second_distributed_table.tenant_id, second_distributed_table.dept FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.=) 1)) foo, (SELECT intermediate_result.tenant_id FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) bar WHERE ((foo.tenant_id OPERATOR(pg_catalog.=) bar.tenant_id) AND (distributed_table.tenant_id OPERATOR(pg_catalog.=) bar.tenant_id)) + AND distributed_table.tenant_id = bar.tenant_id; +DEBUG: generating subplan XXX_1 for subquery SELECT tenant_id FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (dept OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) OFFSET 0 +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_with_different_planner_executors.distributed_table SET dept = foo.dept FROM (SELECT second_distributed_table.tenant_id, second_distributed_table.dept FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.=) 1)) foo, (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) bar WHERE ((foo.tenant_id OPERATOR(pg_catalog.=) bar.tenant_id) AND (distributed_table.tenant_id OPERATOR(pg_catalog.=) bar.tenant_id)) -- a non colocated subquery inside the UPDATE -UPDATE distributed_table SET dept = foo.max_dept FROM +UPDATE distributed_table SET dept = foo.max_dept FROM ( - SELECT + SELECT max(dept) as max_dept - FROM + FROM (SELECT DISTINCT tenant_id, dept FROM distributed_table) as distributed_table - WHERE tenant_id NOT IN + WHERE tenant_id NOT IN (SELECT tenant_id FROM second_distributed_table WHERE dept IN (1, 2, 3, 4)) ) as foo WHERE foo.max_dept > dept * 3; -DEBUG: generating subplan 5_1 for subquery SELECT tenant_id FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (dept OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) -DEBUG: generating subplan 5_2 for subquery SELECT max(dept) AS max_dept FROM (SELECT DISTINCT distributed_table_1.tenant_id, distributed_table_1.dept FROM recursive_dml_with_different_planner_executors.distributed_table distributed_table_1) distributed_table WHERE (NOT (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.tenant_id FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)))) -DEBUG: Plan 5 query after replacing subqueries and CTEs: UPDATE recursive_dml_with_different_planner_executors.distributed_table SET dept = foo.max_dept FROM (SELECT intermediate_result.max_dept FROM read_intermediate_result('5_2'::text, 'binary'::citus_copy_format) intermediate_result(max_dept integer)) foo WHERE (foo.max_dept OPERATOR(pg_catalog.>) (distributed_table.dept OPERATOR(pg_catalog.*) 3)) +DEBUG: generating subplan XXX_1 for subquery SELECT tenant_id FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (dept OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) +DEBUG: generating subplan XXX_2 for subquery SELECT max(dept) AS max_dept FROM (SELECT DISTINCT distributed_table_1.tenant_id, distributed_table_1.dept FROM recursive_dml_with_different_planner_executors.distributed_table distributed_table_1) distributed_table WHERE (NOT (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_with_different_planner_executors.distributed_table SET dept = foo.max_dept FROM (SELECT intermediate_result.max_dept FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max_dept integer)) foo WHERE (foo.max_dept OPERATOR(pg_catalog.>) (distributed_table.dept OPERATOR(pg_catalog.*) 3)) -- subquery with repartition query SET citus.enable_repartition_joins to ON; -UPDATE distributed_table SET dept = foo.some_tenants::int FROM +UPDATE distributed_table SET dept = foo.some_tenants::int FROM ( SELECT DISTINCT second_distributed_table.tenant_id as some_tenants FROM second_distributed_table, distributed_table WHERE second_distributed_table.dept = distributed_table.dept ) as foo; -DEBUG: generating subplan 8_1 for subquery SELECT DISTINCT second_distributed_table.tenant_id AS some_tenants FROM recursive_dml_with_different_planner_executors.second_distributed_table, recursive_dml_with_different_planner_executors.distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.=) distributed_table.dept) -DEBUG: Plan 8 query after replacing subqueries and CTEs: UPDATE recursive_dml_with_different_planner_executors.distributed_table SET dept = (foo.some_tenants)::integer FROM (SELECT intermediate_result.some_tenants FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(some_tenants text)) foo +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT second_distributed_table.tenant_id AS some_tenants FROM recursive_dml_with_different_planner_executors.second_distributed_table, recursive_dml_with_different_planner_executors.distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.=) distributed_table.dept) +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_with_different_planner_executors.distributed_table SET dept = (foo.some_tenants)::integer FROM (SELECT intermediate_result.some_tenants FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(some_tenants text)) foo SET citus.enable_repartition_joins to OFF; --- final query is router -UPDATE distributed_table SET dept = foo.max_dept FROM +-- final query is router +UPDATE distributed_table SET dept = foo.max_dept FROM ( - SELECT + SELECT max(dept) as max_dept - FROM + FROM (SELECT DISTINCT tenant_id, dept FROM distributed_table) as distributed_table - WHERE tenant_id IN + WHERE tenant_id IN (SELECT tenant_id FROM second_distributed_table WHERE dept IN (1, 2, 3, 4)) ) as foo WHERE foo.max_dept >= dept and tenant_id = '8'; -DEBUG: generating subplan 10_1 for subquery SELECT max(dept) AS max_dept FROM (SELECT DISTINCT distributed_table_1.tenant_id, distributed_table_1.dept FROM recursive_dml_with_different_planner_executors.distributed_table distributed_table_1) distributed_table WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT second_distributed_table.tenant_id FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) -DEBUG: Plan 10 query after replacing subqueries and CTEs: UPDATE recursive_dml_with_different_planner_executors.distributed_table SET dept = foo.max_dept FROM (SELECT intermediate_result.max_dept FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(max_dept integer)) foo WHERE ((foo.max_dept OPERATOR(pg_catalog.>=) distributed_table.dept) AND (distributed_table.tenant_id OPERATOR(pg_catalog.=) '8'::text)) +DEBUG: generating subplan XXX_1 for subquery SELECT max(dept) AS max_dept FROM (SELECT DISTINCT distributed_table_1.tenant_id, distributed_table_1.dept FROM recursive_dml_with_different_planner_executors.distributed_table distributed_table_1) distributed_table WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT second_distributed_table.tenant_id FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_with_different_planner_executors.distributed_table SET dept = foo.max_dept FROM (SELECT intermediate_result.max_dept FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max_dept integer)) foo WHERE ((foo.max_dept OPERATOR(pg_catalog.>=) distributed_table.dept) AND (distributed_table.tenant_id OPERATOR(pg_catalog.=) '8'::text)) RESET client_min_messages; DROP SCHEMA recursive_dml_with_different_planner_executors CASCADE; NOTICE: drop cascades to 3 other objects diff --git a/src/test/regress/expected/relation_access_tracking.out b/src/test/regress/expected/relation_access_tracking.out index 2a67c3005..de8b50c8e 100644 --- a/src/test/regress/expected/relation_access_tracking.out +++ b/src/test/regress/expected/relation_access_tracking.out @@ -1,6 +1,6 @@ ---- +--------------------------------------------------------------------- --- tests around access tracking within transaction blocks ---- +--------------------------------------------------------------------- CREATE SCHEMA access_tracking; SET search_path TO 'access_tracking'; CREATE OR REPLACE FUNCTION relation_select_access_mode(relationId Oid) @@ -54,44 +54,44 @@ CREATE VIEW relation_acesses AS SET citus.shard_replication_factor TO 1; CREATE TABLE table_1 (key int, value int); SELECT create_distributed_table('table_1', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table_2 (key int, value int); SELECT create_distributed_table('table_2', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table_3 (key int, value int); SELECT create_distributed_table('table_3', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table_4 (key int, value int); SELECT create_distributed_table('table_4', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table_5 (key int, value int); SELECT create_distributed_table('table_5', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE table_6 (key int, value int); SELECT create_reference_Table('table_6'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO table_1 SELECT i, i FROM generate_series(0,100) i; @@ -104,28 +104,28 @@ INSERT INTO table_6 SELECT i, i FROM generate_series(0,100) i; BEGIN; CREATE TABLE table_7 (key int, value int); SELECT create_distributed_table('table_7', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_7') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_7 | not_parallel_accessed | not_parallel_accessed | parallel_access (1 row) COMMIT; -- outisde the transaction blocks, the function always returns zero SELECT count(*) FROM table_1; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -133,54 +133,54 @@ SELECT * FROM relation_acesses WHERE table_name = 'table_1'; -- and parallel SELECTs,DMLs, and DDLs BEGIN; SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) SELECT count(*) FROM table_1 WHERE key = 1; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) SELECT count(*) FROM table_1 WHERE key = 1 OR key = 2; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed (1 row) INSERT INTO table_1 VALUES (1,1); SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed (1 row) INSERT INTO table_1 VALUES (1,1), (2,2); SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed (1 row) ALTER TABLE table_1 ADD COLUMN test_col INT; -- now see that the other tables are not accessed at all SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | parallel_access (1 row) @@ -189,40 +189,40 @@ ROLLBACK; -- commands executed, we can treat the transaction as sequential BEGIN; SELECT count(*) FROM table_1 WHERE key = 1; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) SELECT count(*) FROM table_1 WHERE key = 2; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) INSERT INTO table_1 VALUES (1,1); SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) INSERT INTO table_1 VALUES (2,2); SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -231,8 +231,8 @@ ROLLBACK; BEGIN; ALTER TABLE table_1 ADD CONSTRAINT table_1_u UNIQUE (key); SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | parallel_access (1 row) @@ -247,14 +247,14 @@ BEGIN; table_1.key = table_2.key AND table_2.key = table_3.key AND table_3.key = table_4.key AND table_4.key = table_5.key AND table_1.key = 1; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM relation_acesses WHERE table_name LIKE 'table_%' ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_3 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -273,14 +273,14 @@ BEGIN; table_1, table_2 WHERE table_1.key = table_2.key; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed (2 rows) @@ -296,14 +296,14 @@ BEGIN; table_1, table_2 WHERE table_1.key = table_2.key; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (2 rows) @@ -323,14 +323,14 @@ BEGIN; table_1.key = table_2.key AND table_2.key = table_3.key AND table_3.key = table_4.key AND table_4.key = table_5.key ) as foo; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name LIKE 'table_%' ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed table_3 | parallel_access | not_parallel_accessed | not_parallel_accessed @@ -347,16 +347,16 @@ ROLLBACK; BEGIN; UPDATE table_1 SET value = 15; SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | parallel_access | not_parallel_accessed (1 row) SET LOCAL citus.multi_shard_modify_mode = 'sequential'; UPDATE table_2 SET value = 15; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | parallel_access | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (2 rows) @@ -368,8 +368,8 @@ BEGIN; table_1 SET value = 15 WHERE key IN (SELECT key FROM table_2 JOIN table_3 USING (key) WHERE table_2.value = 15); SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | parallel_access | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed table_3 | parallel_access | not_parallel_accessed | not_parallel_accessed @@ -380,8 +380,8 @@ ROLLBACK; BEGIN; INSERT INTO table_2 SELECT * FROM table_1; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | parallel_access | not_parallel_accessed (2 rows) @@ -392,8 +392,8 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode = 'sequential'; INSERT INTO table_2 SELECT * FROM table_1; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (2 rows) @@ -403,8 +403,8 @@ ROLLBACK; BEGIN; INSERT INTO table_2 SELECT * FROM table_1 OFFSET 0; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | parallel_access | not_parallel_accessed (2 rows) @@ -424,14 +424,14 @@ BEGIN; table_1.key = table_2.key OFFSET 0 ) as foo; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed (2 rows) @@ -453,8 +453,8 @@ BEGIN; OFFSET 0 ) as foo; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed table_3 | not_parallel_accessed | parallel_access | not_parallel_accessed @@ -479,8 +479,8 @@ BEGIN; OFFSET 0 ) as foo; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_3 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -505,8 +505,8 @@ BEGIN; ) as foo ) AND value IN (SELECT key FROM table_4); SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3', 'table_4') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed table_3 | parallel_access | parallel_access | not_parallel_accessed @@ -521,8 +521,8 @@ BEGIN; 2 2 3 3 SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed (1 row) @@ -531,8 +531,8 @@ ROLLBACK; BEGIN; COPY table_1 FROM STDIN WITH CSV; SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | parallel_access | not_parallel_accessed (1 row) @@ -541,8 +541,8 @@ ROLLBACK; BEGIN; COPY table_1 FROM STDIN WITH CSV; SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -550,28 +550,28 @@ ROLLBACK; -- reference table accesses should always be a sequential BEGIN; SELECT count(*) FROM table_6; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_6'); - table_name | select_access | dml_access | ddl_access -------------+------------------------+--------------+-------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_6 | reference_table_access | not_accessed | not_accessed (1 row) UPDATE table_6 SET value = 15; SELECT * FROM relation_acesses WHERE table_name IN ('table_6'); - table_name | select_access | dml_access | ddl_access -------------+------------------------+------------------------+-------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_6 | reference_table_access | reference_table_access | not_accessed (1 row) ALTER TABLE table_6 ADD COLUMN x INT; SELECT * FROM relation_acesses WHERE table_name IN ('table_6'); - table_name | select_access | dml_access | ddl_access -------------+------------------------+------------------------+------------------------ + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_6 | reference_table_access | reference_table_access | reference_table_access (1 row) @@ -579,14 +579,14 @@ ROLLBACK; -- reference table join with a distributed table BEGIN; SELECT count(*) FROM table_1 JOIN table_6 USING(key); - count -------- + count +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_6', 'table_1') ORDER BY 1,2; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_6 | parallel_access | not_accessed | not_accessed (2 rows) @@ -596,8 +596,8 @@ ROLLBACK; BEGIN; TRUNCATE table_1; SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | parallel_access (1 row) @@ -607,8 +607,8 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode = 'sequential'; TRUNCATE table_1; SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -617,8 +617,8 @@ ROLLBACK; BEGIN; TRUNCATE table_6; SELECT * FROM relation_acesses WHERE table_name IN ('table_6') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+---------------+--------------+------------------------ + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_6 | not_accessed | not_accessed | reference_table_access (1 row) @@ -628,8 +628,8 @@ ALTER TABLE table_1 ADD CONSTRAINT table_1_u UNIQUE (key); BEGIN; ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key); SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | parallel_access table_2 | not_parallel_accessed | not_parallel_accessed | parallel_access (2 rows) @@ -641,8 +641,8 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode = 'sequential'; ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key); SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (2 rows) @@ -650,17 +650,17 @@ BEGIN; ROLLBACK; CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); SELECT create_distributed_table('partitioning_test', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Adding partition tables via CREATE TABLE should have DDL access the partitioned table as well BEGIN; CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | parallel_access (2 rows) @@ -671,8 +671,8 @@ CREATE TABLE partitioning_test_2009 AS SELECT * FROM partitioning_test; BEGIN; ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2009 FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | parallel_access (2 rows) @@ -681,16 +681,16 @@ COMMIT; -- Adding partition tables via ATTACH PARTITION on distributed tables should have DDL access the partitioned table as well CREATE TABLE partitioning_test_2010 AS SELECT * FROM partitioning_test; SELECT create_distributed_table('partitioning_test_2010', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) BEGIN; ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2010 FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | parallel_access (2 rows) @@ -699,14 +699,14 @@ COMMIT; -- reading from partitioned table marks all of its partitions BEGIN; SELECT count(*) FROM partitioning_test; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------------------+-----------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- partitioning_test | parallel_access | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | parallel_access | not_parallel_accessed | not_parallel_accessed partitioning_test_2010 | parallel_access | not_parallel_accessed | not_parallel_accessed @@ -717,14 +717,14 @@ COMMIT; BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT count(*) FROM partitioning_test; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -735,8 +735,8 @@ COMMIT; BEGIN; UPDATE partitioning_test SET time = now(); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------------------+-----------------+-----------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- partitioning_test | parallel_access | parallel_access | not_parallel_accessed partitioning_test_2009 | parallel_access | parallel_access | not_parallel_accessed partitioning_test_2010 | parallel_access | parallel_access | not_parallel_accessed @@ -748,8 +748,8 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; UPDATE partitioning_test SET time = now(); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -760,8 +760,8 @@ COMMIT; BEGIN; ALTER TABLE partitioning_test ADD COLUMN X INT; SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | parallel_access @@ -773,8 +773,8 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; ALTER TABLE partitioning_test ADD COLUMN X INT; SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -784,14 +784,14 @@ ROLLBACK; -- reading from partition table marks its parent BEGIN; SELECT count(*) FROM partitioning_test_2009; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- partitioning_test | parallel_access | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | parallel_access | not_parallel_accessed | not_parallel_accessed partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -802,14 +802,14 @@ COMMIT; BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT count(*) FROM partitioning_test_2009; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -820,8 +820,8 @@ COMMIT; BEGIN; UPDATE partitioning_test_2009 SET time = now(); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- partitioning_test | parallel_access | parallel_access | not_parallel_accessed partitioning_test_2009 | parallel_access | parallel_access | not_parallel_accessed partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -833,8 +833,8 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; UPDATE partitioning_test_2009 SET time = now(); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -845,8 +845,8 @@ COMMIT; BEGIN; CREATE INDEX i1000000 ON partitioning_test_2009 (id); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -858,8 +858,8 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE INDEX i1000000 ON partitioning_test_2009 (id); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -872,8 +872,8 @@ BEGIN; TRUNCATE table_1 CASCADE; NOTICE: truncate cascades to table "table_2" SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | parallel_access table_2 | not_parallel_accessed | not_parallel_accessed | parallel_access (2 rows) @@ -883,14 +883,14 @@ ROLLBACK; BEGIN; WITH cte AS (SELECT count(*) FROM table_1) SELECT * FROM cte; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed (1 row) @@ -900,14 +900,14 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode = 'sequential'; WITH cte AS (SELECT count(*) FROM table_1) SELECT * FROM cte; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -916,16 +916,16 @@ COMMIT; BEGIN; WITH cte_1 AS (INSERT INTO table_1 VALUES (1000,1000), (1001, 1001), (1002, 1002) RETURNING *) SELECT * FROM cte_1 ORDER BY 1; - key | value -------+------- + key | value +--------------------------------------------------------------------- 1000 | 1000 1001 | 1001 1002 | 1002 (3 rows) SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -934,14 +934,14 @@ ROLLBACK; BEGIN; WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *) SELECT count(*) FROM cte_1 ORDER BY 1; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | parallel_access | not_parallel_accessed (1 row) @@ -950,14 +950,14 @@ ROLLBACK; BEGIN; WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *) SELECT count(*) FROM cte_1 ORDER BY 1; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------+----------------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_1 | parallel_access | parallel_access | not_parallel_accessed (1 row) @@ -970,14 +970,14 @@ INSERT INTO table_3 SELECT i, i FROM generate_series(0,100) i; BEGIN; SELECT create_distributed_table('table_3', 'key'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_3') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------+----------------- + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- table_3 | not_parallel_accessed | parallel_access | parallel_access (1 row) diff --git a/src/test/regress/expected/remove_coordinator.out b/src/test/regress/expected/remove_coordinator.out index f06ad8c6b..e59a1f89e 100644 --- a/src/test/regress/expected/remove_coordinator.out +++ b/src/test/regress/expected/remove_coordinator.out @@ -1,7 +1,7 @@ -- removing coordinator from pg_dist_node should update pg_dist_colocation SELECT master_remove_node('localhost', :master_port); - master_remove_node --------------------- - + master_remove_node +--------------------------------------------------------------------- + (1 row) diff --git a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out index 83f0c0cf7..b68926154 100644 --- a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out @@ -12,26 +12,26 @@ SET client_min_messages TO LOG; SET citus.log_local_commands TO ON; CREATE TABLE squares(a int, b int); SELECT create_reference_table('squares'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO squares SELECT i, i * i FROM generate_series(1, 10) i; -- should be executed locally SELECT count(*) FROM squares; LOG: executing the command locally: SELECT count(*) AS count FROM replicate_ref_to_coordinator.squares_8000000 squares - count -------- + count +--------------------------------------------------------------------- 10 (1 row) -- create a second reference table CREATE TABLE numbers(a int); SELECT create_reference_table('numbers'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO numbers VALUES (20), (21); @@ -39,21 +39,21 @@ LOG: executing the command locally: INSERT INTO replicate_ref_to_coordinator.nu -- INSERT ... SELECT between reference tables BEGIN; EXPLAIN INSERT INTO squares SELECT a, a*a FROM numbers; - QUERY PLAN ------------------------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57636 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Insert on squares_8000000 citus_table_alias (cost=0.00..41.88 rows=2550 width=8) -> Seq Scan on numbers_8000001 numbers (cost=0.00..41.88 rows=2550 width=8) (7 rows) INSERT INTO squares SELECT a, a*a FROM numbers; SELECT * FROM squares WHERE a >= 20 ORDER BY a; - a | b -----+----- + a | b +--------------------------------------------------------------------- 20 | 400 21 | 441 (2 rows) @@ -61,13 +61,13 @@ SELECT * FROM squares WHERE a >= 20 ORDER BY a; ROLLBACK; BEGIN; EXPLAIN INSERT INTO numbers SELECT a FROM squares WHERE a < 3; - QUERY PLAN ----------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57636 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Insert on numbers_8000001 citus_table_alias (cost=0.00..38.25 rows=753 width=4) -> Seq Scan on squares_8000000 squares (cost=0.00..38.25 rows=753 width=4) Filter: (a < 3) @@ -75,8 +75,8 @@ EXPLAIN INSERT INTO numbers SELECT a FROM squares WHERE a < 3; INSERT INTO numbers SELECT a FROM squares WHERE a < 3; SELECT * FROM numbers ORDER BY a; - a ----- + a +--------------------------------------------------------------------- 1 2 20 @@ -86,8 +86,8 @@ SELECT * FROM numbers ORDER BY a; ROLLBACK; -- Make sure we hide shard tables ... SELECT citus_table_is_visible('numbers_8000001'::regclass::oid); - citus_table_is_visible ------------------------- + citus_table_is_visible +--------------------------------------------------------------------- f (1 row) @@ -95,8 +95,8 @@ SELECT citus_table_is_visible('numbers_8000001'::regclass::oid); CREATE TABLE local_table(a int); INSERT INTO local_table VALUES (2), (4), (7), (20); EXPLAIN SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers; - QUERY PLAN -------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Merge Join (cost=359.57..860.00 rows=32512 width=8) Merge Cond: (local_table.a = numbers_8000001.a) -> Sort (cost=179.78..186.16 rows=2550 width=4) @@ -108,8 +108,8 @@ EXPLAIN SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers; (8 rows) SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1; - a | a -----+---- + a | a +--------------------------------------------------------------------- 20 | 20 (1 row) @@ -118,8 +118,8 @@ SELECT lt.a, sq.a, sq.b FROM local_table lt JOIN squares sq ON sq.a > lt.a and sq.b > 90 ORDER BY 1,2,3; - a | a | b ----+----+----- + a | a | b +--------------------------------------------------------------------- 2 | 10 | 100 4 | 10 | 100 7 | 10 | 100 @@ -158,15 +158,15 @@ ERROR: cannot join local tables and reference tables in a transaction block, ud CONTEXT: SQL statement "SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1" PL/pgSQL function test_reference_local_join_plpgsql_func() line 5 at PERFORM SELECT sum(a) FROM local_table; - sum ------ + sum +--------------------------------------------------------------------- 33 (1 row) SELECT sum(a) FROM numbers; LOG: executing the command locally: SELECT sum(a) AS sum FROM replicate_ref_to_coordinator.numbers_8000001 numbers - sum ------ + sum +--------------------------------------------------------------------- 41 (1 row) @@ -181,9 +181,9 @@ CONTEXT: SQL function "test_reference_local_join_proc" statement 1 CREATE SCHEMA s1; CREATE TABLE s1.ref(a int); SELECT create_reference_table('s1.ref'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) BEGIN; @@ -238,16 +238,16 @@ ERROR: relation local_table is not distributed WITH t AS (SELECT *, random() x FROM numbers) SELECT * FROM numbers, local_table WHERE EXISTS (SELECT * FROM t WHERE t.x = numbers.a); - a | a ----+--- + a | a +--------------------------------------------------------------------- (0 rows) -- shouldn't plan locally even if distributed table is in CTE or subquery CREATE TABLE dist(a int); SELECT create_distributed_table('dist', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO dist VALUES (20),(30); @@ -274,8 +274,8 @@ SELECT public.coordinator_plan($Q$ EXPLAIN (COSTS FALSE) SELECT * FROM squares JOIN numbers_v ON squares.a = numbers_v.a; $Q$); - coordinator_plan ------------------------------- + coordinator_plan +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 (2 rows) @@ -285,10 +285,10 @@ SELECT public.coordinator_plan($Q$ EXPLAIN (COSTS FALSE) SELECT * FROM squares JOIN local_table_v ON squares.a = local_table_v.a; $Q$); - coordinator_plan ------------------------------------------------- + coordinator_plan +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) - -> Distributed Subplan 24_1 + -> Distributed Subplan XXX_1 -> Seq Scan on local_table Filter: ((a >= 1) AND (a <= 10)) Task Count: 1 @@ -306,8 +306,8 @@ SELECT public.plan_is_distributed($Q$ EXPLAIN (COSTS FALSE) SELECT * FROM squares JOIN numbers_v ON squares.a = numbers_v.a; $Q$); - plan_is_distributed ---------------------- + plan_is_distributed +--------------------------------------------------------------------- f (1 row) @@ -328,8 +328,8 @@ SELECT public.plan_is_distributed($Q$ EXPLAIN (COSTS FALSE) SELECT abs(a.a) FROM local_table a, numbers b WHERE a.a = b.a; $Q$); - plan_is_distributed ---------------------- + plan_is_distributed +--------------------------------------------------------------------- f (1 row) @@ -337,8 +337,8 @@ SELECT public.plan_is_distributed($Q$ EXPLAIN (COSTS FALSE) SELECT a.a FROM local_table a, numbers b WHERE a.a = b.a ORDER BY abs(a.a); $Q$); - plan_is_distributed ---------------------- + plan_is_distributed +--------------------------------------------------------------------- f (1 row) @@ -351,7 +351,5 @@ DROP SCHEMA replicate_ref_to_coordinator CASCADE; -- Make sure the shard was dropped SELECT 'numbers_8000001'::regclass::oid; ERROR: relation "numbers_8000001" does not exist -LINE 1: SELECT 'numbers_8000001'::regclass::oid; - ^ SET search_path TO DEFAULT; RESET client_min_messages; diff --git a/src/test/regress/expected/replicated_partitioned_table.out b/src/test/regress/expected/replicated_partitioned_table.out index fb6c5eb16..5ca5dc80b 100644 --- a/src/test/regress/expected/replicated_partitioned_table.out +++ b/src/test/regress/expected/replicated_partitioned_table.out @@ -28,9 +28,9 @@ INSERT INTO collections (key, ts, collection_id, value) VALUES (4, '2009-01-01', SELECT create_distributed_table('collections', 'key'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- now create partition of a already distributed table @@ -44,9 +44,9 @@ NOTICE: Copying data from local table... -- finally attach a distributed table to a distributed table CREATE TABLE collections_5 AS SELECT * FROM collections LIMIT 0; SELECT create_distributed_table('collections_5', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- load some data @@ -64,8 +64,8 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | placement_count ----------------+----------------- + logicalrelid | placement_count +--------------------------------------------------------------------- collections | 8 collections_1 | 8 collections_2 | 8 @@ -81,8 +81,8 @@ FROM pg_dist_partition WHERE logicalrelid::text LIKE '%collections%'; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -145,9 +145,9 @@ HINT: Run the query on the parent table "collections" instead. -- foreign key creation is disallowed due to replication factor > 1 CREATE TABLE fkey_test (key bigint PRIMARY KEY); SELECT create_distributed_table('fkey_test', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) ALTER TABLE @@ -168,14 +168,14 @@ ALTER TABLE collections DETACH PARTITION collections_6; ALTER TABLE collections ATTACH PARTITION collections_6 FOR VALUES IN ( 6 ); -- read queries works just fine SELECT count(*) FROM collections_1 WHERE key = 1; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM collections_1 WHERE key != 1; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -185,9 +185,9 @@ CREATE TABLE collections_agg ( sum_value numeric ); SELECT create_distributed_table('collections_agg', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- pushdown roll-up @@ -212,9 +212,9 @@ CREATE INDEX ON customer_engagements (id, event_id); SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('customer_engagements', 'id', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- ingest some data for the tests @@ -245,15 +245,15 @@ ROLLBACK; -- modifications after reparing a shard are fine (will use new metadata) BEGIN; SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); - master_copy_shard_placement ------------------------------ - + master_copy_shard_placement +--------------------------------------------------------------------- + (1 row) ALTER TABLE customer_engagements ADD COLUMN value float DEFAULT 1.0; SELECT * FROM customer_engagements ORDER BY 1,2,3; - id | event_id | value -----+----------+------- + id | event_id | value +--------------------------------------------------------------------- 1 | 1 | 1 1 | 2 | 1 2 | 1 | 1 @@ -263,15 +263,15 @@ SELECT * FROM customer_engagements ORDER BY 1,2,3; ROLLBACK; BEGIN; SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); - master_copy_shard_placement ------------------------------ - + master_copy_shard_placement +--------------------------------------------------------------------- + (1 row) INSERT INTO customer_engagements VALUES (1, 1); SELECT count(*) FROM customer_engagements; - count -------- + count +--------------------------------------------------------------------- 5 (1 row) diff --git a/src/test/regress/expected/row_types.out b/src/test/regress/expected/row_types.out index 29d59d950..c82b33b48 100644 --- a/src/test/regress/expected/row_types.out +++ b/src/test/regress/expected/row_types.out @@ -3,9 +3,9 @@ CREATE SCHEMA row_types; SET search_path TO row_types; CREATE TABLE test (x int, y int); SELECT create_distributed_table('test','x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE OR REPLACE FUNCTION table_returner(INT) RETURNS TABLE(name text, id INT) @@ -15,9 +15,9 @@ BEGIN END; $$ language plpgsql; SELECT create_distributed_function('table_returner(int)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) CREATE OR REPLACE FUNCTION record_returner(INOUT id int, OUT name text) @@ -29,9 +29,9 @@ BEGIN END; $$ language plpgsql; SELECT create_distributed_function('record_returner(int)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) CREATE OR REPLACE FUNCTION identity_returner(x anyelement) @@ -42,16 +42,16 @@ BEGIN END; $$ language plpgsql; SELECT create_distributed_function('identity_returner(anyelement)'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) INSERT INTO test VALUES (1,2), (1,3), (2,2), (2,3); -- multi-shard queries support row types SELECT (x,y) FROM test ORDER BY x, y; - row -------- + row +--------------------------------------------------------------------- (1,2) (1,3) (2,2) @@ -59,8 +59,8 @@ SELECT (x,y) FROM test ORDER BY x, y; (4 rows) SELECT (x,y) FROM test GROUP BY x, y ORDER BY x, y; - row -------- + row +--------------------------------------------------------------------- (1,2) (1,3) (2,2) @@ -68,8 +68,8 @@ SELECT (x,y) FROM test GROUP BY x, y ORDER BY x, y; (4 rows) SELECT ARRAY[NULL,(x,(y,x)),NULL,(y,(x,y))] FROM test ORDER BY x, y; - array ---------------------------------------------- + array +--------------------------------------------------------------------- {NULL,"(1,\"(2,1)\")",NULL,"(2,\"(1,2)\")"} {NULL,"(1,\"(3,1)\")",NULL,"(3,\"(1,3)\")"} {NULL,"(2,\"(2,2)\")",NULL,"(2,\"(2,2)\")"} @@ -77,8 +77,8 @@ SELECT ARRAY[NULL,(x,(y,x)),NULL,(y,(x,y))] FROM test ORDER BY x, y; (4 rows) SELECT ARRAY[[(x,(y,x))],[(x,(x,y))]] FROM test ORDER BY x, y; - array ---------------------------------------- + array +--------------------------------------------------------------------- {{"(1,\"(2,1)\")"},{"(1,\"(1,2)\")"}} {{"(1,\"(3,1)\")"},{"(1,\"(1,3)\")"}} {{"(2,\"(2,2)\")"},{"(2,\"(2,2)\")"}} @@ -86,8 +86,8 @@ SELECT ARRAY[[(x,(y,x))],[(x,(x,y))]] FROM test ORDER BY x, y; (4 rows) select distinct (x,y) AS foo, x, y FROM test ORDER BY x, y; - foo | x | y --------+---+--- + foo | x | y +--------------------------------------------------------------------- (1,2) | 1 | 2 (1,3) | 1 | 3 (2,2) | 2 | 2 @@ -95,8 +95,8 @@ select distinct (x,y) AS foo, x, y FROM test ORDER BY x, y; (4 rows) SELECT table_returner(x) FROM test ORDER BY x, y; - table_returner ----------------- + table_returner +--------------------------------------------------------------------- (1,1) (1,1) (2,2) @@ -104,8 +104,8 @@ SELECT table_returner(x) FROM test ORDER BY x, y; (4 rows) SELECT record_returner(x) FROM test ORDER BY x, y; - record_returner ------------------ + record_returner +--------------------------------------------------------------------- (2,returned) (2,returned) (3,returned) @@ -113,17 +113,17 @@ SELECT record_returner(x) FROM test ORDER BY x, y; (4 rows) SELECT NULLIF((x, y), (y, x)) FROM test ORDER BY x, y; - nullif --------- + nullif +--------------------------------------------------------------------- (1,2) (1,3) - + (2,3) (4 rows) SELECT LEAST((x, y), (y, x)) FROM test ORDER BY x, y; - least -------- + least +--------------------------------------------------------------------- (1,2) (1,3) (2,2) @@ -131,8 +131,8 @@ SELECT LEAST((x, y), (y, x)) FROM test ORDER BY x, y; (4 rows) SELECT GREATEST((x, y), (y, x)) FROM test ORDER BY x, y; - greatest ----------- + greatest +--------------------------------------------------------------------- (2,1) (3,1) (2,2) @@ -140,8 +140,8 @@ SELECT GREATEST((x, y), (y, x)) FROM test ORDER BY x, y; (4 rows) SELECT COALESCE(NULL, (x, y), (y, x)) FROM test ORDER BY x, y; - coalesce ----------- + coalesce +--------------------------------------------------------------------- (1,2) (1,3) (2,2) @@ -149,8 +149,8 @@ SELECT COALESCE(NULL, (x, y), (y, x)) FROM test ORDER BY x, y; (4 rows) SELECT CASE x WHEN 2 THEN (x, y) ELSE (y, x) END FROM test ORDER BY x, y; - row -------- + row +--------------------------------------------------------------------- (2,1) (3,1) (2,2) @@ -158,10 +158,10 @@ SELECT CASE x WHEN 2 THEN (x, y) ELSE (y, x) END FROM test ORDER BY x, y; (4 rows) SELECT CASE x WHEN 2 THEN (x, y) END FROM test ORDER BY x, y; - case -------- - - + case +--------------------------------------------------------------------- + + (2,2) (2,3) (4 rows) @@ -180,94 +180,94 @@ SELECT array_agg((x, y)) FROM test; ERROR: input of anonymous composite types is not implemented -- router queries support row types SELECT (x,y) FROM test WHERE x = 1 ORDER BY x, y; - row -------- + row +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) SELECT (x,y) AS foo FROM test WHERE x = 1 ORDER BY x, y; - foo -------- + foo +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) SELECT ARRAY[NULL,(x,(y,x)),NULL,(y,(x,y))] FROM test WHERE x = 1 ORDER BY x, y; - array ---------------------------------------------- + array +--------------------------------------------------------------------- {NULL,"(1,\"(2,1)\")",NULL,"(2,\"(1,2)\")"} {NULL,"(1,\"(3,1)\")",NULL,"(3,\"(1,3)\")"} (2 rows) SELECT ARRAY[[(x,(y,x))],[(x,(x,y))]] FROM test WHERE x = 1 ORDER BY x, y; - array ---------------------------------------- + array +--------------------------------------------------------------------- {{"(1,\"(2,1)\")"},{"(1,\"(1,2)\")"}} {{"(1,\"(3,1)\")"},{"(1,\"(1,3)\")"}} (2 rows) select distinct (x,y) AS foo, x, y FROM test WHERE x = 1 ORDER BY x, y; - foo | x | y --------+---+--- + foo | x | y +--------------------------------------------------------------------- (1,2) | 1 | 2 (1,3) | 1 | 3 (2 rows) SELECT table_returner(x) FROM test WHERE x = 1 ORDER BY x, y; - table_returner ----------------- + table_returner +--------------------------------------------------------------------- (1,1) (1,1) (2 rows) SELECT record_returner(x) FROM test WHERE x = 1 ORDER BY x, y; - record_returner ------------------ + record_returner +--------------------------------------------------------------------- (2,returned) (2,returned) (2 rows) SELECT NULLIF((x, y), (y, x)) FROM test WHERE x = 1 ORDER BY x, y; - nullif --------- + nullif +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) SELECT LEAST((x, y), (y, x)) FROM test WHERE x = 1 ORDER BY x, y; - least -------- + least +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) SELECT GREATEST((x, y), (y, x)) FROM test WHERE x = 1 ORDER BY x, y; - greatest ----------- + greatest +--------------------------------------------------------------------- (2,1) (3,1) (2 rows) SELECT COALESCE(NULL, (x, y), (y, x)) FROM test WHERE x = 1 ORDER BY x, y; - coalesce ----------- + coalesce +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) SELECT CASE x WHEN 2 THEN (x, y) ELSE (y, x) END FROM test WHERE x = 1 ORDER BY x, y; - row -------- + row +--------------------------------------------------------------------- (2,1) (3,1) (2 rows) SELECT CASE x WHEN 2 THEN (x, y) END FROM test WHERE x = 1 ORDER BY x, y; - case ------- - - + case +--------------------------------------------------------------------- + + (2 rows) -- varying shape unsupported @@ -284,15 +284,15 @@ SELECT array_agg((x, y)) FROM test WHERE x = 1; ERROR: input of anonymous composite types is not implemented -- nested row expressions SELECT (x,(x,y)) AS foo FROM test WHERE x = 1 ORDER BY x, y; - foo -------------- + foo +--------------------------------------------------------------------- (1,"(1,2)") (1,"(1,3)") (2 rows) SELECT (x,record_returner(x)) FROM test WHERE x = 1 ORDER BY x, y; - row --------------------- + row +--------------------------------------------------------------------- (1,"(2,returned)") (1,"(2,returned)") (2 rows) @@ -303,43 +303,43 @@ ERROR: input of anonymous composite types is not implemented -- try prepared statements PREPARE rec(int) AS SELECT (x,y*$1) FROM test WHERE x = $1 ORDER BY x, y; EXECUTE rec(1); - row -------- + row +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) EXECUTE rec(1); - row -------- + row +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) EXECUTE rec(1); - row -------- + row +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) EXECUTE rec(1); - row -------- + row +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) EXECUTE rec(1); - row -------- + row +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) EXECUTE rec(1); - row -------- + row +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) diff --git a/src/test/regress/expected/sequential_modifications.out b/src/test/regress/expected/sequential_modifications.out index 53e057836..a2a4d8b43 100644 --- a/src/test/regress/expected/sequential_modifications.out +++ b/src/test/regress/expected/sequential_modifications.out @@ -64,38 +64,38 @@ CREATE OR REPLACE FUNCTION set_local_multi_shard_modify_mode_to_sequential() -- disbable 2PC recovery since our tests will check that ALTER SYSTEM SET citus.recover_2pc_interval TO -1; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) CREATE TABLE test_table(a int, b int); SELECT create_distributed_table('test_table', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- not useful if not in transaction SELECT set_local_multi_shard_modify_mode_to_sequential(); - set_local_multi_shard_modify_mode_to_sequential -------------------------------------------------- - + set_local_multi_shard_modify_mode_to_sequential +--------------------------------------------------------------------- + (1 row) -- we should see #worker transactions -- when sequential mode is used SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) ALTER TABLE test_table ADD CONSTRAINT a_check CHECK(a > 0); SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count --------------------------------------------- + distributed_2pcs_are_equal_to_worker_count +--------------------------------------------------------------------- t (1 row) @@ -103,15 +103,15 @@ SELECT distributed_2PCs_are_equal_to_worker_count(); -- when parallel mode is used SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) ALTER TABLE test_table ADD CONSTRAINT b_check CHECK(b > 0); SELECT distributed_2PCs_are_equal_to_placement_count(); - distributed_2pcs_are_equal_to_placement_count ------------------------------------------------ + distributed_2pcs_are_equal_to_placement_count +--------------------------------------------------------------------- t (1 row) @@ -119,68 +119,68 @@ SELECT distributed_2PCs_are_equal_to_placement_count(); SET citus.multi_shard_commit_protocol TO '1pc'; SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) ALTER TABLE test_table ADD CONSTRAINT c_check CHECK(a > 0); SELECT no_distributed_2PCs(); - no_distributed_2pcs ---------------------- + no_distributed_2pcs +--------------------------------------------------------------------- t (1 row) SET citus.multi_shard_commit_protocol TO '1pc'; SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) ALTER TABLE test_table ADD CONSTRAINT d_check CHECK(a > 0); SELECT no_distributed_2PCs(); - no_distributed_2pcs ---------------------- + no_distributed_2pcs +--------------------------------------------------------------------- t (1 row) CREATE TABLE ref_test(a int); SELECT create_reference_table('ref_test'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SET citus.multi_shard_commit_protocol TO '1pc'; -- reference tables should always use 2PC SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) CREATE INDEX ref_test_seq_index ON ref_test(a); SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count --------------------------------------------- + distributed_2pcs_are_equal_to_worker_count +--------------------------------------------------------------------- t (1 row) -- reference tables should always use 2PC SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) CREATE INDEX ref_test_seq_index_2 ON ref_test(a); SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count --------------------------------------------- + distributed_2pcs_are_equal_to_worker_count +--------------------------------------------------------------------- t (1 row) @@ -189,38 +189,38 @@ SELECT distributed_2PCs_are_equal_to_worker_count(); SET citus.shard_replication_factor TO 2; CREATE TABLE test_table_rep_2 (a int); SELECT create_distributed_table('test_table_rep_2', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- 1PC should never use 2PC with rep > 1 SET citus.multi_shard_commit_protocol TO '1pc'; SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) CREATE INDEX test_table_rep_2_i_1 ON test_table_rep_2(a); SELECT no_distributed_2PCs(); - no_distributed_2pcs ---------------------- + no_distributed_2pcs +--------------------------------------------------------------------- t (1 row) SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) CREATE INDEX test_table_rep_2_i_2 ON test_table_rep_2(a); SELECT no_distributed_2PCs(); - no_distributed_2pcs ---------------------- + no_distributed_2pcs +--------------------------------------------------------------------- t (1 row) @@ -228,29 +228,29 @@ SELECT no_distributed_2PCs(); SET citus.multi_shard_commit_protocol TO '2pc'; SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) CREATE INDEX test_table_rep_2_i_3 ON test_table_rep_2(a); SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count --------------------------------------------- + distributed_2pcs_are_equal_to_worker_count +--------------------------------------------------------------------- t (1 row) SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) CREATE INDEX test_table_rep_2_i_4 ON test_table_rep_2(a); SELECT distributed_2PCs_are_equal_to_placement_count(); - distributed_2pcs_are_equal_to_placement_count ------------------------------------------------ + distributed_2pcs_are_equal_to_placement_count +--------------------------------------------------------------------- t (1 row) @@ -259,32 +259,32 @@ SELECT distributed_2PCs_are_equal_to_placement_count(); SET citus.multi_shard_commit_protocol TO '2pc'; SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) CREATE INDEX CONCURRENTLY test_table_rep_2_i_5 ON test_table_rep_2(a); -- we shouldn't see any distributed transactions SELECT no_distributed_2PCs(); - no_distributed_2pcs ---------------------- + no_distributed_2pcs +--------------------------------------------------------------------- t (1 row) SET citus.multi_shard_commit_protocol TO '2pc'; SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) CREATE INDEX CONCURRENTLY test_table_rep_2_i_6 ON test_table_rep_2(a); -- we shouldn't see any distributed transactions SELECT no_distributed_2PCs(); - no_distributed_2pcs ---------------------- + no_distributed_2pcs +--------------------------------------------------------------------- t (1 row) @@ -293,38 +293,38 @@ CREATE TABLE test_seq_truncate (a int); INSERT INTO test_seq_truncate SELECT i FROM generate_series(0, 100) i; SELECT create_distributed_table('test_seq_truncate', 'a'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- with parallel modification mode, we should see #shards records SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) TRUNCATE test_seq_truncate; SELECT distributed_2PCs_are_equal_to_placement_count(); - distributed_2pcs_are_equal_to_placement_count ------------------------------------------------ + distributed_2pcs_are_equal_to_placement_count +--------------------------------------------------------------------- t (1 row) -- with sequential modification mode, we should see #primary worker records SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) TRUNCATE test_seq_truncate; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count --------------------------------------------- + distributed_2pcs_are_equal_to_worker_count +--------------------------------------------------------------------- t (1 row) @@ -332,37 +332,37 @@ SELECT distributed_2PCs_are_equal_to_worker_count(); CREATE TABLE test_seq_truncate_rep_2 (a int); SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('test_seq_truncate_rep_2', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_seq_truncate_rep_2 SELECT i FROM generate_series(0, 100) i; SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) TRUNCATE test_seq_truncate_rep_2; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count --------------------------------------------- + distributed_2pcs_are_equal_to_worker_count +--------------------------------------------------------------------- t (1 row) SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) TRUNCATE test_seq_truncate_rep_2; SELECT distributed_2PCs_are_equal_to_placement_count(); - distributed_2pcs_are_equal_to_placement_count ------------------------------------------------ + distributed_2pcs_are_equal_to_placement_count +--------------------------------------------------------------------- t (1 row) @@ -371,38 +371,38 @@ CREATE TABLE multi_shard_modify_test ( t_name varchar(25) not null, t_value integer not null); SELECT create_distributed_table('multi_shard_modify_test', 't_key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- with parallel modification mode, we should see #shards records SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) DELETE FROM multi_shard_modify_test; SELECT distributed_2PCs_are_equal_to_placement_count(); - distributed_2pcs_are_equal_to_placement_count ------------------------------------------------ + distributed_2pcs_are_equal_to_placement_count +--------------------------------------------------------------------- t (1 row) -- with sequential modification mode, we should see #primary worker records SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) DELETE FROM multi_shard_modify_test; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count --------------------------------------------- + distributed_2pcs_are_equal_to_worker_count +--------------------------------------------------------------------- t (1 row) @@ -412,17 +412,17 @@ BEGIN; INSERT INTO multi_shard_modify_test VALUES (1,'1',1), (2,'2',2), (3,'3',3), (4,'4',4); -- now switch to sequential mode to enable a successful TRUNCATE SELECT set_local_multi_shard_modify_mode_to_sequential(); - set_local_multi_shard_modify_mode_to_sequential -------------------------------------------------- - + set_local_multi_shard_modify_mode_to_sequential +--------------------------------------------------------------------- + (1 row) TRUNCATE multi_shard_modify_test; COMMIT; -- see that all the data successfully removed SELECT count(*) FROM multi_shard_modify_test; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -430,29 +430,29 @@ SELECT count(*) FROM multi_shard_modify_test; -- with sequential modification mode, we should see #primary worker records SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) INSERT INTO multi_shard_modify_test SELECT * FROM multi_shard_modify_test; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count --------------------------------------------- + distributed_2pcs_are_equal_to_worker_count +--------------------------------------------------------------------- t (1 row) SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) INSERT INTO multi_shard_modify_test SELECT * FROM multi_shard_modify_test; SELECT distributed_2PCs_are_equal_to_placement_count(); - distributed_2pcs_are_equal_to_placement_count ------------------------------------------------ + distributed_2pcs_are_equal_to_placement_count +--------------------------------------------------------------------- t (1 row) @@ -462,32 +462,32 @@ BEGIN; INSERT INTO multi_shard_modify_test VALUES (1,'1',1), (2,'2',2), (3,'3',3), (4,'4',4); -- now switch to sequential mode to enable a successful INSERT .. SELECT SELECT set_local_multi_shard_modify_mode_to_sequential(); - set_local_multi_shard_modify_mode_to_sequential -------------------------------------------------- - + set_local_multi_shard_modify_mode_to_sequential +--------------------------------------------------------------------- + (1 row) INSERT INTO multi_shard_modify_test SELECT * FROM multi_shard_modify_test; COMMIT; -- see that all the data successfully inserted SELECT count(*) FROM multi_shard_modify_test; - count -------- + count +--------------------------------------------------------------------- 210 (1 row) ALTER SYSTEM SET citus.recover_2pc_interval TO DEFAULT; SET citus.shard_replication_factor TO DEFAULT; SELECT pg_reload_conf(); - pg_reload_conf ----------------- + pg_reload_conf +--------------------------------------------------------------------- t (1 row) -- The following tests are added to test if create_distributed_table honors sequential mode SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) @@ -496,25 +496,25 @@ CREATE TABLE test_seq_multi_shard_update(a int, b int); BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('test_seq_multi_shard_update', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_seq_multi_shard_update VALUES (0, 0), (1, 0), (2, 0), (3, 0), (4, 0); DELETE FROM test_seq_multi_shard_update WHERE b < 2; COMMIT; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count --------------------------------------------- + distributed_2pcs_are_equal_to_worker_count +--------------------------------------------------------------------- t (1 row) DROP TABLE test_seq_multi_shard_update; -- Check if truncate works properly after create_distributed_table in sequential mode SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) @@ -522,25 +522,25 @@ CREATE TABLE test_seq_truncate_after_create(a int, b int); BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('test_seq_truncate_after_create', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_seq_truncate_after_create VALUES (0, 0), (1, 0), (2, 0), (3, 0), (4, 0); TRUNCATE test_seq_truncate_after_create; COMMIT; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count --------------------------------------------- + distributed_2pcs_are_equal_to_worker_count +--------------------------------------------------------------------- t (1 row) DROP TABLE test_seq_truncate_after_create; -- Check if drop table works properly after create_distributed_table in sequential mode SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) @@ -548,23 +548,23 @@ CREATE TABLE test_seq_drop_table(a int, b int); BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('test_seq_drop_table', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) DROP TABLE test_seq_drop_table; COMMIT; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count --------------------------------------------- + distributed_2pcs_are_equal_to_worker_count +--------------------------------------------------------------------- t (1 row) -- Check if copy errors out properly after create_distributed_table in sequential mode SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) @@ -572,24 +572,24 @@ CREATE TABLE test_seq_copy(a int, b int); BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('test_seq_copy', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) \COPY test_seq_copy FROM STDIN DELIMITER AS ','; ROLLBACK; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count --------------------------------------------- + distributed_2pcs_are_equal_to_worker_count +--------------------------------------------------------------------- f (1 row) DROP TABLE test_seq_copy; -- Check if DDL + CREATE INDEX works properly after create_distributed_table in sequential mode SELECT recover_prepared_transactions(); - recover_prepared_transactions -------------------------------- + recover_prepared_transactions +--------------------------------------------------------------------- 0 (1 row) @@ -597,9 +597,9 @@ CREATE TABLE test_seq_ddl_index(a int, b int); BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('test_seq_ddl_index', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_seq_ddl_index VALUES (0, 0), (1, 0), (2, 0), (3, 0), (4, 0); @@ -607,8 +607,8 @@ BEGIN; CREATE INDEX idx ON test_seq_ddl_index(c); COMMIT; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count --------------------------------------------- + distributed_2pcs_are_equal_to_worker_count +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/set_operation_and_local_tables.out b/src/test/regress/expected/set_operation_and_local_tables.out index f5f5e0884..9adcdb5ba 100644 --- a/src/test/regress/expected/set_operation_and_local_tables.out +++ b/src/test/regress/expected/set_operation_and_local_tables.out @@ -2,16 +2,16 @@ CREATE SCHEMA recursive_set_local; SET search_path TO recursive_set_local, public; CREATE TABLE recursive_set_local.test (x int, y int); SELECT create_distributed_table('test', 'x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE recursive_set_local.ref (a int, b int); SELECT create_reference_table('ref'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE recursive_set_local.local_test (x int, y int); @@ -22,42 +22,42 @@ SET client_min_messages TO DEBUG; -- we should be able to run set operations with local tables (SELECT x FROM test) INTERSECT (SELECT x FROM local_test) ORDER BY 1 DESC; DEBUG: Local tables cannot be used in distributed queries. -DEBUG: generating subplan 3_1 for subquery SELECT x FROM recursive_set_local.local_test +DEBUG: generating subplan XXX_1 for subquery SELECT x FROM recursive_set_local.local_test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 3_2 for subquery SELECT x FROM recursive_set_local.test -DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) ORDER BY 1 DESC +DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_set_local.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x ---- + x +--------------------------------------------------------------------- (0 rows) -- we should be able to run set operations with generate series (SELECT x FROM test) INTERSECT (SELECT i FROM generate_series(0, 100) i) ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 5_1 for subquery SELECT x FROM recursive_set_local.test -DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i) ORDER BY 1 DESC +DEBUG: generating subplan XXX_1 for subquery SELECT x FROM recursive_set_local.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x ---- + x +--------------------------------------------------------------------- 2 1 (2 rows) --- we'd first recursively plan the query with "test", thus don't need to recursively +-- we'd first recursively plan the query with "test", thus don't need to recursively -- plan other query (SELECT x FROM test LIMIT 5) INTERSECT (SELECT i FROM generate_series(0, 100) i) ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 5 -DEBUG: generating subplan 7_1 for subquery SELECT x FROM recursive_set_local.test LIMIT 5 -DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i) ORDER BY 1 DESC +DEBUG: generating subplan XXX_1 for subquery SELECT x FROM recursive_set_local.test LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x ---- + x +--------------------------------------------------------------------- 2 1 (2 rows) @@ -66,8 +66,8 @@ DEBUG: Plan is router executable (SELECT a FROM ref) INTERSECT (SELECT i FROM generate_series(0, 100) i) ORDER BY 1 DESC; DEBUG: Creating router plan DEBUG: Plan is router executable - a ---- + a +--------------------------------------------------------------------- 3 2 (2 rows) @@ -76,23 +76,23 @@ DEBUG: Plan is router executable (SELECT x FROM test) INTERSECT (SELECT i/0 FROM generate_series(0, 100) i) ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 10_1 for subquery SELECT x FROM recursive_set_local.test -DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT (i.i OPERATOR(pg_catalog./) 0) FROM generate_series(0, 100) i(i) ORDER BY 1 DESC +DEBUG: generating subplan XXX_1 for subquery SELECT x FROM recursive_set_local.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT (i.i OPERATOR(pg_catalog./) 0) FROM generate_series(0, 100) i(i) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable ERROR: division by zero -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- we should be able to run set operations with generate series and local tables as well ((SELECT x FROM local_test) UNION ALL (SELECT x FROM test)) INTERSECT (SELECT i FROM generate_series(0, 100) i) ORDER BY 1 DESC; DEBUG: Local tables cannot be used in distributed queries. -DEBUG: generating subplan 12_1 for subquery SELECT x FROM recursive_set_local.local_test +DEBUG: generating subplan XXX_1 for subquery SELECT x FROM recursive_set_local.local_test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 12_2 for subquery SELECT x FROM recursive_set_local.test -DEBUG: Plan 12 query after replacing subqueries and CTEs: (SELECT intermediate_result.x FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION ALL SELECT intermediate_result.x FROM read_intermediate_result('12_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i) ORDER BY 1 DESC +DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_set_local.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: (SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION ALL SELECT intermediate_result.x FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x ---- + x +--------------------------------------------------------------------- 4 3 2 @@ -102,36 +102,36 @@ DEBUG: Plan is router executable -- two local tables are on different leaf queries, so safe to plan & execute ((SELECT x FROM local_test) UNION ALL (SELECT x FROM test)) INTERSECT (SELECT x FROM local_test) ORDER BY 1 DESC; DEBUG: Local tables cannot be used in distributed queries. -DEBUG: generating subplan 14_1 for subquery SELECT x FROM recursive_set_local.local_test -DEBUG: generating subplan 14_2 for subquery SELECT x FROM recursive_set_local.local_test +DEBUG: generating subplan XXX_1 for subquery SELECT x FROM recursive_set_local.local_test +DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_set_local.local_test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 14_3 for subquery SELECT x FROM recursive_set_local.test -DEBUG: Plan 14 query after replacing subqueries and CTEs: (SELECT intermediate_result.x FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION ALL SELECT intermediate_result.x FROM read_intermediate_result('14_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) ORDER BY 1 DESC +DEBUG: generating subplan XXX_3 for subquery SELECT x FROM recursive_set_local.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: (SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION ALL SELECT intermediate_result.x FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x ---- + x +--------------------------------------------------------------------- 4 3 (2 rows) -- use ctes inside unions along with local tables on the top level -WITH +WITH cte_1 AS (SELECT user_id FROM users_table), cte_2 AS (SELECT user_id FROM events_table) ((SELECT * FROM cte_1) UNION (SELECT * FROM cte_2) UNION (SELECT x FROM local_test)) INTERSECT (SELECT i FROM generate_series(0, 100) i) ORDER BY 1 DESC; DEBUG: Local tables cannot be used in distributed queries. -DEBUG: generating subplan 16_1 for CTE cte_1: SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT user_id FROM public.users_table DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 16_2 for CTE cte_2: SELECT user_id FROM public.events_table +DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT user_id FROM public.events_table DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 16_3 for subquery SELECT x FROM recursive_set_local.local_test -DEBUG: Plan 16 query after replacing subqueries and CTEs: (SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('16_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 UNION SELECT intermediate_result.x FROM read_intermediate_result('16_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i) ORDER BY 1 DESC +DEBUG: generating subplan XXX_3 for subquery SELECT x FROM recursive_set_local.local_test +DEBUG: Plan XXX query after replacing subqueries and CTEs: (SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 UNION SELECT intermediate_result.x FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - user_id ---------- + user_id +--------------------------------------------------------------------- 6 5 4 @@ -146,27 +146,27 @@ SELECT count(*) FROM ( - ((WITH cte_1 AS (SELECT x FROM test) SELECT * FROM cte_1) UNION + ((WITH cte_1 AS (SELECT x FROM test) SELECT * FROM cte_1) UNION (WITH cte_1 AS (SELECT a FROM ref) SELECT * FROM cte_1)) INTERSECT (SELECT x FROM local_test) ) as foo, test WHERE test.y = foo.x; DEBUG: Local tables cannot be used in distributed queries. -DEBUG: generating subplan 19_1 for CTE cte_1: SELECT x FROM recursive_set_local.test +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT x FROM recursive_set_local.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 19_2 for CTE cte_1: SELECT a FROM recursive_set_local.ref +DEBUG: generating subplan XXX_2 for CTE cte_1: SELECT a FROM recursive_set_local.ref DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 19_3 for subquery SELECT x FROM recursive_set_local.local_test +DEBUG: generating subplan XXX_3 for subquery SELECT x FROM recursive_set_local.local_test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 19_4 for subquery (SELECT cte_1.x FROM (SELECT intermediate_result.x FROM read_intermediate_result('19_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) cte_1 UNION SELECT cte_1.a FROM (SELECT intermediate_result.a FROM read_intermediate_result('19_2'::text, 'binary'::citus_copy_format) intermediate_result(a integer)) cte_1) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('19_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer) -DEBUG: Plan 19 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.x FROM read_intermediate_result('19_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) foo, recursive_set_local.test WHERE (test.y OPERATOR(pg_catalog.=) foo.x) +DEBUG: generating subplan XXX_4 for subquery (SELECT cte_1.x FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) cte_1 UNION SELECT cte_1.a FROM (SELECT intermediate_result.a FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(a integer)) cte_1) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) foo, recursive_set_local.test WHERE (test.y OPERATOR(pg_catalog.=) foo.x) DEBUG: Router planner cannot handle multi-shard select queries - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -176,46 +176,46 @@ SELECT count(*) FROM ( - ((WITH cte_1 AS (SELECT x FROM test) SELECT * FROM cte_1) UNION + ((WITH cte_1 AS (SELECT x FROM test) SELECT * FROM cte_1) UNION (WITH cte_1 AS (SELECT a FROM ref) SELECT * FROM cte_1)) INTERSECT (SELECT x FROM local_test) ) as foo, ref WHERE ref.a = foo.x; DEBUG: Local tables cannot be used in distributed queries. -DEBUG: generating subplan 23_1 for CTE cte_1: SELECT x FROM recursive_set_local.test +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT x FROM recursive_set_local.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 23_2 for CTE cte_1: SELECT a FROM recursive_set_local.ref +DEBUG: generating subplan XXX_2 for CTE cte_1: SELECT a FROM recursive_set_local.ref DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 23_3 for subquery SELECT x FROM recursive_set_local.local_test +DEBUG: generating subplan XXX_3 for subquery SELECT x FROM recursive_set_local.local_test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 23_4 for subquery (SELECT cte_1.x FROM (SELECT intermediate_result.x FROM read_intermediate_result('23_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) cte_1 UNION SELECT cte_1.a FROM (SELECT intermediate_result.a FROM read_intermediate_result('23_2'::text, 'binary'::citus_copy_format) intermediate_result(a integer)) cte_1) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('23_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer) -DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.x FROM read_intermediate_result('23_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) foo, recursive_set_local.ref WHERE (ref.a OPERATOR(pg_catalog.=) foo.x) +DEBUG: generating subplan XXX_4 for subquery (SELECT cte_1.x FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) cte_1 UNION SELECT cte_1.a FROM (SELECT intermediate_result.a FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(a integer)) cte_1) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) foo, recursive_set_local.ref WHERE (ref.a OPERATOR(pg_catalog.=) foo.x) DEBUG: Creating router plan DEBUG: Plan is router executable - count -------- + count +--------------------------------------------------------------------- 1 (1 row) -- subquery union in WHERE clause without parition column equality is recursively planned including the local tables SELECT * FROM test a WHERE x IN (SELECT x FROM test b UNION SELECT y FROM test c UNION SELECT y FROM local_test d) ORDER BY 1,2; DEBUG: Local tables cannot be used in distributed queries. -DEBUG: generating subplan 27_1 for subquery SELECT y FROM recursive_set_local.local_test d +DEBUG: generating subplan XXX_1 for subquery SELECT y FROM recursive_set_local.local_test d DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 27_2 for subquery SELECT x FROM recursive_set_local.test b +DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_set_local.test b DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 27_3 for subquery SELECT y FROM recursive_set_local.test c +DEBUG: generating subplan XXX_3 for subquery SELECT y FROM recursive_set_local.test c DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 27_4 for subquery SELECT intermediate_result.x FROM read_intermediate_result('27_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('27_3'::text, 'binary'::citus_copy_format) intermediate_result(y integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('27_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer) -DEBUG: Plan 27 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_set_local.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('27_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer))) ORDER BY x, y +DEBUG: generating subplan XXX_4 for subquery SELECT intermediate_result.x FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(y integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM recursive_set_local.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer))) ORDER BY x, y DEBUG: Router planner cannot handle multi-shard select queries - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -223,20 +223,20 @@ DEBUG: Router planner cannot handle multi-shard select queries -- same query with subquery in where is wrapped in CTE SELECT * FROM test a WHERE x IN (WITH cte AS (SELECT x FROM test b UNION SELECT y FROM test c UNION SELECT y FROM local_test d) SELECT * FROM cte) ORDER BY 1,2; DEBUG: Local tables cannot be used in distributed queries. -DEBUG: generating subplan 31_1 for CTE cte: SELECT b.x FROM recursive_set_local.test b UNION SELECT c.y FROM recursive_set_local.test c UNION SELECT d.y FROM recursive_set_local.local_test d +DEBUG: generating subplan XXX_1 for CTE cte: SELECT b.x FROM recursive_set_local.test b UNION SELECT c.y FROM recursive_set_local.test c UNION SELECT d.y FROM recursive_set_local.local_test d DEBUG: Local tables cannot be used in distributed queries. -DEBUG: generating subplan 32_1 for subquery SELECT y FROM recursive_set_local.local_test d +DEBUG: generating subplan XXX_1 for subquery SELECT y FROM recursive_set_local.local_test d DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 32_2 for subquery SELECT x FROM recursive_set_local.test b +DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_set_local.test b DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 32_3 for subquery SELECT y FROM recursive_set_local.test c -DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('32_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('32_3'::text, 'binary'::citus_copy_format) intermediate_result(y integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('32_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer) +DEBUG: generating subplan XXX_3 for subquery SELECT y FROM recursive_set_local.test c +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(y integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer) DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_set_local.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT cte.x FROM (SELECT intermediate_result.x FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) cte)) ORDER BY x, y +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM recursive_set_local.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT cte.x FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) cte)) ORDER BY x, y DEBUG: Router planner cannot handle multi-shard select queries - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -246,43 +246,43 @@ SELECT * FROM ((SELECT * FROM test) EXCEPT (SELECT * FROM test ORDER BY x LIMIT DEBUG: Local tables cannot be used in distributed queries. DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 1 -DEBUG: generating subplan 35_1 for subquery SELECT x, y FROM recursive_set_local.test ORDER BY x LIMIT 1 +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_set_local.test ORDER BY x LIMIT 1 DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 35_2 for subquery SELECT x, y FROM recursive_set_local.test +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_set_local.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 35_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('35_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('35_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) -DEBUG: Plan 35 query after replacing subqueries and CTEs: SELECT u.x, u.y, local_test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('35_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_set_local.local_test USING (x)) ORDER BY u.x, u.y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT u.x, u.y, local_test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_set_local.local_test USING (x)) ORDER BY u.x, u.y DEBUG: Local tables cannot be used in distributed queries. ERROR: relation local_test is not distributed --- though we replace some queries including the local query, the intermediate result is on the outer part of an outer join +-- though we replace some queries including the local query, the intermediate result is on the outer part of an outer join SELECT * FROM ((SELECT * FROM local_test) INTERSECT (SELECT * FROM test ORDER BY x LIMIT 1)) u LEFT JOIN test USING (x) ORDER BY 1,2; DEBUG: Local tables cannot be used in distributed queries. -DEBUG: generating subplan 39_1 for subquery SELECT x, y FROM recursive_set_local.local_test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_set_local.local_test DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 1 -DEBUG: generating subplan 39_2 for subquery SELECT x, y FROM recursive_set_local.test ORDER BY x LIMIT 1 +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_set_local.test ORDER BY x LIMIT 1 DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 39_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('39_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('39_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) -DEBUG: Plan 39 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('39_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u LEFT JOIN recursive_set_local.test USING (x)) ORDER BY u.x, u.y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u LEFT JOIN recursive_set_local.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries ERROR: cannot pushdown the subquery DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer join --- we replace some queries including the local query, the intermediate result is on the inner part of an outer join +-- we replace some queries including the local query, the intermediate result is on the inner part of an outer join SELECT * FROM ((SELECT * FROM local_test) INTERSECT (SELECT * FROM test ORDER BY x LIMIT 1)) u RIGHT JOIN test USING (x) ORDER BY 1,2; DEBUG: Local tables cannot be used in distributed queries. -DEBUG: generating subplan 42_1 for subquery SELECT x, y FROM recursive_set_local.local_test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_set_local.local_test DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 1 -DEBUG: generating subplan 42_2 for subquery SELECT x, y FROM recursive_set_local.test ORDER BY x LIMIT 1 +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_set_local.test ORDER BY x LIMIT 1 DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 42_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('42_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) -DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT test.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('42_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u RIGHT JOIN recursive_set_local.test USING (x)) ORDER BY test.x, u.y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT test.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u RIGHT JOIN recursive_set_local.test USING (x)) ORDER BY test.x, u.y DEBUG: Router planner cannot handle multi-shard select queries - x | y | y ----+---+--- + x | y | y +--------------------------------------------------------------------- 1 | | 1 2 | | 2 (2 rows) @@ -290,45 +290,45 @@ DEBUG: Router planner cannot handle multi-shard select queries -- recurively plan left part of the join, and run a final real-time query SELECT * FROM ((SELECT * FROM local_test) INTERSECT (SELECT * FROM test ORDER BY x LIMIT 1)) u INNER JOIN test USING (x) ORDER BY 1,2; DEBUG: Local tables cannot be used in distributed queries. -DEBUG: generating subplan 45_1 for subquery SELECT x, y FROM recursive_set_local.local_test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_set_local.local_test DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 1 -DEBUG: generating subplan 45_2 for subquery SELECT x, y FROM recursive_set_local.test ORDER BY x LIMIT 1 +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_set_local.test ORDER BY x LIMIT 1 DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 45_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('45_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('45_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) -DEBUG: Plan 45 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('45_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_set_local.test USING (x)) ORDER BY u.x, u.y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_set_local.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries - x | y | y ----+---+--- + x | y | y +--------------------------------------------------------------------- (0 rows) -- set operations and the sublink can be recursively planned SELECT * FROM ((SELECT x FROM test) UNION (SELECT x FROM (SELECT x FROM local_test) as foo WHERE x IN (SELECT x FROM test))) u ORDER BY 1; DEBUG: Local tables cannot be used in distributed queries. -DEBUG: generating subplan 48_1 for subquery SELECT x FROM recursive_set_local.local_test +DEBUG: generating subplan XXX_1 for subquery SELECT x FROM recursive_set_local.local_test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 48_2 for subquery SELECT x FROM recursive_set_local.test +DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_set_local.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 48_3 for subquery SELECT x FROM (SELECT intermediate_result.x FROM read_intermediate_result('48_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) foo WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('48_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer))) +DEBUG: generating subplan XXX_3 for subquery SELECT x FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) foo WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer))) DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 48_4 for subquery SELECT x FROM recursive_set_local.test +DEBUG: generating subplan XXX_4 for subquery SELECT x FROM recursive_set_local.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 48_5 for subquery SELECT intermediate_result.x FROM read_intermediate_result('48_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.x FROM read_intermediate_result('48_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer) -DEBUG: Plan 48 query after replacing subqueries and CTEs: SELECT x FROM (SELECT intermediate_result.x FROM read_intermediate_result('48_5'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) u ORDER BY x +DEBUG: generating subplan XXX_5 for subquery SELECT intermediate_result.x FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.x FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) u ORDER BY x DEBUG: Creating router plan DEBUG: Plan is router executable - x ---- + x +--------------------------------------------------------------------- 1 2 (2 rows) SET citus.task_executor_type TO 'task-tracker'; -- repartition is recursively planned before the set operation -(SELECT x FROM test) INTERSECT (SELECT t1.x FROM test as t1, test as t2 WHERE t1.x = t2.y LIMIT 2) INTERSECT (((SELECT x FROM local_test) UNION ALL (SELECT x FROM test)) INTERSECT (SELECT i FROM generate_series(0, 100) i)) ORDER BY 1 DESC; +(SELECT x FROM test) INTERSECT (SELECT t1.x FROM test as t1, test as t2 WHERE t1.x = t2.y LIMIT 2) INTERSECT (((SELECT x FROM local_test) UNION ALL (SELECT x FROM test)) INTERSECT (SELECT i FROM generate_series(0, 100) i)) ORDER BY 1 DESC; DEBUG: Local tables cannot be used in distributed queries. DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 2 @@ -360,17 +360,17 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 20 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 20 -DEBUG: generating subplan 53_1 for subquery SELECT t1.x FROM recursive_set_local.test t1, recursive_set_local.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y) LIMIT 2 -DEBUG: generating subplan 53_2 for subquery SELECT x FROM recursive_set_local.local_test +DEBUG: generating subplan XXX_1 for subquery SELECT t1.x FROM recursive_set_local.test t1, recursive_set_local.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y) LIMIT 2 +DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_set_local.local_test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 53_3 for subquery SELECT x FROM recursive_set_local.test +DEBUG: generating subplan XXX_3 for subquery SELECT x FROM recursive_set_local.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 53_4 for subquery SELECT x FROM recursive_set_local.test -DEBUG: Plan 53 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('53_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('53_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT ((SELECT intermediate_result.x FROM read_intermediate_result('53_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION ALL SELECT intermediate_result.x FROM read_intermediate_result('53_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i)) ORDER BY 1 DESC +DEBUG: generating subplan XXX_4 for subquery SELECT x FROM recursive_set_local.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT ((SELECT intermediate_result.x FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION ALL SELECT intermediate_result.x FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i)) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x ---- + x +--------------------------------------------------------------------- 2 1 (2 rows) diff --git a/src/test/regress/expected/set_operations.out b/src/test/regress/expected/set_operations.out index 9958d2426..a29f711d1 100644 --- a/src/test/regress/expected/set_operations.out +++ b/src/test/regress/expected/set_operations.out @@ -2,23 +2,23 @@ CREATE SCHEMA recursive_union; SET search_path TO recursive_union, public; CREATE TABLE recursive_union.test (x int, y int); SELECT create_distributed_table('test', 'x'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE recursive_union.ref (a int, b int); SELECT create_reference_table('ref'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE test_not_colocated (LIKE test); SELECT create_distributed_table('test_not_colocated', 'x', colocate_with := 'none'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test VALUES (1,1), (2,2); @@ -28,14 +28,14 @@ SET client_min_messages TO DEBUG; (SELECT * FROM test) UNION (SELECT * FROM test) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 3_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 3_2 for subquery SELECT x, y FROM recursive_union.test -DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_union.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -43,12 +43,12 @@ DEBUG: Plan is router executable (SELECT * FROM test) UNION (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 6_1 for subquery SELECT x, y FROM recursive_union.test -DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -57,8 +57,8 @@ DEBUG: Plan is router executable (SELECT * FROM ref) UNION (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b ----+--- + a | b +--------------------------------------------------------------------- 2 | 2 3 | 3 (2 rows) @@ -66,14 +66,14 @@ DEBUG: Plan is router executable (SELECT * FROM test) UNION ALL (SELECT * FROM test) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 9_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 9_2 for subquery SELECT x, y FROM recursive_union.test -DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION ALL SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('9_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_union.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION ALL SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 1 | 1 2 | 2 @@ -83,12 +83,12 @@ DEBUG: Plan is router executable (SELECT * FROM test) UNION ALL (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 12_1 for subquery SELECT x, y FROM recursive_union.test -DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION ALL SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION ALL SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 2 | 2 @@ -98,8 +98,8 @@ DEBUG: Plan is router executable (SELECT * FROM ref) UNION ALL (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b ----+--- + a | b +--------------------------------------------------------------------- 2 | 2 2 | 2 3 | 3 @@ -109,14 +109,14 @@ DEBUG: Plan is router executable (SELECT * FROM test) INTERSECT (SELECT * FROM test) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 15_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 15_2 for subquery SELECT x, y FROM recursive_union.test -DEBUG: Plan 15 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('15_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('15_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_union.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -124,20 +124,20 @@ DEBUG: Plan is router executable (SELECT * FROM test) INTERSECT (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 18_1 for subquery SELECT x, y FROM recursive_union.test -DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 2 | 2 (1 row) (SELECT * FROM ref) INTERSECT (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b ----+--- + a | b +--------------------------------------------------------------------- 2 | 2 3 | 3 (2 rows) @@ -145,14 +145,14 @@ DEBUG: Plan is router executable (SELECT * FROM test) INTERSECT ALL (SELECT * FROM test) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 21_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 21_2 for subquery SELECT x, y FROM recursive_union.test -DEBUG: Plan 21 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('21_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT ALL SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('21_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_union.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT ALL SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -160,20 +160,20 @@ DEBUG: Plan is router executable (SELECT * FROM test) INTERSECT ALL (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 24_1 for subquery SELECT x, y FROM recursive_union.test -DEBUG: Plan 24 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('24_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT ALL SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT ALL SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 2 | 2 (1 row) (SELECT * FROM ref) INTERSECT ALL (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b ----+--- + a | b +--------------------------------------------------------------------- 2 | 2 3 | 3 (2 rows) @@ -181,65 +181,65 @@ DEBUG: Plan is router executable (SELECT * FROM test) EXCEPT (SELECT * FROM test) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 27_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 27_2 for subquery SELECT x, y FROM recursive_union.test -DEBUG: Plan 27 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('27_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('27_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_union.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- (0 rows) (SELECT * FROM test) EXCEPT (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 30_1 for subquery SELECT x, y FROM recursive_union.test -DEBUG: Plan 30 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('30_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 (1 row) (SELECT * FROM ref) EXCEPT (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b ----+--- + a | b +--------------------------------------------------------------------- (0 rows) (SELECT * FROM test) EXCEPT ALL (SELECT * FROM test) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 33_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 33_2 for subquery SELECT x, y FROM recursive_union.test -DEBUG: Plan 33 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('33_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT ALL SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('33_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_union.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT ALL SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- (0 rows) (SELECT * FROM test) EXCEPT ALL (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 36_1 for subquery SELECT x, y FROM recursive_union.test -DEBUG: Plan 36 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('36_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT ALL SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT ALL SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 (1 row) (SELECT * FROM ref) EXCEPT ALL (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b ----+--- + a | b +--------------------------------------------------------------------- (0 rows) -- more complex set operation trees are supported @@ -255,15 +255,15 @@ UNION ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 39_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 39_2 for subquery SELECT test.x, ref.a FROM (recursive_union.test LEFT JOIN recursive_union.ref ON ((test.x OPERATOR(pg_catalog.=) ref.a))) -DEBUG: Plan 39 query after replacing subqueries and CTEs: (((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('39_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT ref.a, ref.b FROM recursive_union.ref) UNION ALL SELECT s.s, s.s FROM generate_series(1, 10) s(s)) EXCEPT SELECT 1, 1) UNION SELECT intermediate_result.x, intermediate_result.a FROM read_intermediate_result('39_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, a integer) ORDER BY 1, 2 +DEBUG: generating subplan XXX_2 for subquery SELECT test.x, ref.a FROM (recursive_union.test LEFT JOIN recursive_union.ref ON ((test.x OPERATOR(pg_catalog.=) ref.a))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: (((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT ref.a, ref.b FROM recursive_union.ref) UNION ALL SELECT s.s, s.s FROM generate_series(1, 10) s(s)) EXCEPT SELECT 1, 1) UNION SELECT intermediate_result.x, intermediate_result.a FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, a integer) ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y -----+---- - 1 | + x | y +--------------------------------------------------------------------- + 1 | 2 | 2 3 | 3 4 | 4 @@ -278,8 +278,8 @@ DEBUG: Plan is router executable -- within a subquery, some unions can be pushed down SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -287,17 +287,17 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM ((SELECT x, y FROM test) UNION (SELECT y, x FROM test)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 43_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 43_2 for subquery SELECT y, x FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT y, x FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 43_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('43_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.y, intermediate_result.x FROM read_intermediate_result('43_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer, x integer) -DEBUG: Plan 43 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('43_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.y, intermediate_result.x FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer, x integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -305,15 +305,15 @@ DEBUG: Plan is router executable SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM ref)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 47_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 47_2 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('47_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT ref.a, ref.b FROM recursive_union.ref -DEBUG: Plan 47 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('47_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y +DEBUG: generating subplan XXX_2 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT ref.a, ref.b FROM recursive_union.ref +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -322,16 +322,16 @@ DEBUG: Plan is router executable SELECT * FROM ((SELECT * FROM ref) UNION (SELECT * FROM ref)) u ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b ----+--- + a | b +--------------------------------------------------------------------- 2 | 2 3 | 3 (2 rows) SELECT * FROM ((SELECT * FROM test) UNION ALL (SELECT * FROM test)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 1 | 1 2 | 2 @@ -340,8 +340,8 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM ((SELECT x, y FROM test) UNION ALL (SELECT y, x FROM test)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 1 | 1 2 | 2 @@ -351,15 +351,15 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM ((SELECT * FROM test) UNION ALL (SELECT * FROM ref)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 53_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 53_2 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('53_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION ALL SELECT ref.a, ref.b FROM recursive_union.ref -DEBUG: Plan 53 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('53_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y +DEBUG: generating subplan XXX_2 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION ALL SELECT ref.a, ref.b FROM recursive_union.ref +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 2 | 2 @@ -369,8 +369,8 @@ DEBUG: Plan is router executable SELECT * FROM ((SELECT * FROM ref) UNION ALL (SELECT * FROM ref)) u ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b ----+--- + a | b +--------------------------------------------------------------------- 2 | 2 2 | 2 3 | 3 @@ -380,17 +380,17 @@ DEBUG: Plan is router executable SELECT * FROM ((SELECT * FROM test) INTERSECT (SELECT * FROM test)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 57_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 57_2 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 57_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('57_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('57_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) -DEBUG: Plan 57 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('57_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -398,17 +398,17 @@ DEBUG: Plan is router executable SELECT * FROM ((SELECT x, y FROM test) INTERSECT (SELECT y, x FROM test)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 61_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 61_2 for subquery SELECT y, x FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT y, x FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 61_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('61_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.y, intermediate_result.x FROM read_intermediate_result('61_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer, x integer) -DEBUG: Plan 61 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('61_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.y, intermediate_result.x FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer, x integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -416,23 +416,23 @@ DEBUG: Plan is router executable SELECT * FROM ((SELECT * FROM test) INTERSECT (SELECT * FROM ref)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 65_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 65_2 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('65_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT ref.a, ref.b FROM recursive_union.ref -DEBUG: Plan 65 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('65_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y +DEBUG: generating subplan XXX_2 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT ref.a, ref.b FROM recursive_union.ref +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 2 | 2 (1 row) SELECT * FROM ((SELECT * FROM ref) INTERSECT (SELECT * FROM ref)) u ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b ----+--- + a | b +--------------------------------------------------------------------- 2 | 2 3 | 3 (2 rows) @@ -440,70 +440,70 @@ DEBUG: Plan is router executable SELECT * FROM ((SELECT * FROM test) EXCEPT (SELECT * FROM test)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 69_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 69_2 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 69_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('69_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('69_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) -DEBUG: Plan 69 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('69_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- (0 rows) SELECT * FROM ((SELECT x, y FROM test) EXCEPT (SELECT y, x FROM test)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 73_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 73_2 for subquery SELECT y, x FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT y, x FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 73_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('73_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT intermediate_result.y, intermediate_result.x FROM read_intermediate_result('73_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer, x integer) -DEBUG: Plan 73 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('73_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT intermediate_result.y, intermediate_result.x FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer, x integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- (0 rows) SELECT * FROM ((SELECT * FROM test) EXCEPT (SELECT * FROM ref)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 77_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 77_2 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('77_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT ref.a, ref.b FROM recursive_union.ref -DEBUG: Plan 77 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('77_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y +DEBUG: generating subplan XXX_2 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT ref.a, ref.b FROM recursive_union.ref +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 (1 row) SELECT * FROM ((SELECT * FROM ref) EXCEPT (SELECT * FROM ref)) u ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b ----+--- + a | b +--------------------------------------------------------------------- (0 rows) -- unions can even be pushed down within a join SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test)) u JOIN test USING (x) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - x | y | y ----+---+--- + x | y | y +--------------------------------------------------------------------- 1 | 1 | 1 2 | 2 | 2 (2 rows) SELECT * FROM ((SELECT * FROM test) UNION ALL (SELECT * FROM test)) u LEFT JOIN test USING (x) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - x | y | y ----+---+--- + x | y | y +--------------------------------------------------------------------- 1 | 1 | 1 1 | 1 | 1 2 | 2 | 2 @@ -515,16 +515,16 @@ SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test ORDER BY x LIMIT 1 DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 1 -DEBUG: generating subplan 83_1 for subquery SELECT x, y FROM recursive_union.test ORDER BY x LIMIT 1 +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test ORDER BY x LIMIT 1 DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 83_2 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 83_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('83_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('83_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) -DEBUG: Plan 83 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('83_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries - x | y | y ----+---+--- + x | y | y +--------------------------------------------------------------------- 1 | 1 | 1 2 | 2 | 2 (2 rows) @@ -533,13 +533,13 @@ SELECT * FROM ((SELECT * FROM test) UNION ALL (SELECT * FROM test ORDER BY x LIM DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 1 -DEBUG: generating subplan 87_1 for subquery SELECT x, y FROM recursive_union.test ORDER BY x LIMIT 1 +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test ORDER BY x LIMIT 1 DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 87_2 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 87_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('87_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION ALL SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('87_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) -DEBUG: Plan 87 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('87_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u LEFT JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION ALL SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u LEFT JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries ERROR: cannot pushdown the subquery DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer join @@ -547,16 +547,16 @@ DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer jo SELECT * FROM ((SELECT x, y FROM test) UNION (SELECT y, x FROM test)) u JOIN test USING (x) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 91_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 91_2 for subquery SELECT y, x FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT y, x FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 91_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('91_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.y, intermediate_result.x FROM read_intermediate_result('91_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer, x integer) -DEBUG: Plan 91 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('91_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.y, intermediate_result.x FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer, x integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries - x | y | y ----+---+--- + x | y | y +--------------------------------------------------------------------- 1 | 1 | 1 2 | 2 | 2 (2 rows) @@ -564,16 +564,16 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM ((SELECT x, y FROM test) UNION (SELECT 1, 1 FROM test)) u JOIN test USING (x) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 95_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 95_2 for subquery SELECT 1, 1 FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT 1, 1 FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 95_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('95_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('95_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer) -DEBUG: Plan 95 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('95_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries - x | y | y ----+---+--- + x | y | y +--------------------------------------------------------------------- 1 | 1 | 1 2 | 2 | 2 (2 rows) @@ -581,8 +581,8 @@ DEBUG: Router planner cannot handle multi-shard select queries -- a join between a set operation and a generate_series which is pushdownable SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test ORDER BY x)) u JOIN generate_series(1,10) x USING (x) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -591,17 +591,17 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM ((SELECT * FROM test) EXCEPT (SELECT * FROM test ORDER BY x)) u JOIN generate_series(1,10) x USING (x) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 100_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 100_2 for subquery SELECT x, y FROM recursive_union.test ORDER BY x +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_union.test ORDER BY x DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 100_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('100_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('100_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) -DEBUG: Plan 100 query after replacing subqueries and CTEs: SELECT u.x, u.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('100_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN (SELECT x_1.x FROM generate_series(1, 10) x_1(x)) x USING (x)) ORDER BY u.x, u.y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT u.x, u.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN (SELECT x_1.x FROM generate_series(1, 10) x_1(x)) x USING (x)) ORDER BY u.x, u.y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- (0 rows) -- subqueries in WHERE clause with set operations fails due to the current limitaions of recursive planning IN WHERE clause @@ -614,11 +614,11 @@ SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test)) foo WHERE x IN ( DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 4 -DEBUG: generating subplan 105_1 for subquery SELECT y FROM recursive_union.test ORDER BY y LIMIT 4 -DEBUG: Plan 105 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT test.x, test.y FROM recursive_union.test UNION SELECT test.x, test.y FROM recursive_union.test) foo WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.y FROM read_intermediate_result('105_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer))) ORDER BY x +DEBUG: generating subplan XXX_1 for subquery SELECT y FROM recursive_union.test ORDER BY y LIMIT 4 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT test.x, test.y FROM recursive_union.test UNION SELECT test.x, test.y FROM recursive_union.test) foo WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer))) ORDER BY x DEBUG: Router planner cannot handle multi-shard select queries - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -628,19 +628,19 @@ SELECT * FROM ((SELECT x,y FROM test) UNION (SELECT y,x FROM test)) foo WHERE x DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 4 -DEBUG: generating subplan 107_1 for subquery SELECT y FROM recursive_union.test ORDER BY y LIMIT 4 +DEBUG: generating subplan XXX_1 for subquery SELECT y FROM recursive_union.test ORDER BY y LIMIT 4 DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 107_2 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 107_3 for subquery SELECT y, x FROM recursive_union.test +DEBUG: generating subplan XXX_3 for subquery SELECT y, x FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 107_4 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('107_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.y, intermediate_result.x FROM read_intermediate_result('107_3'::text, 'binary'::citus_copy_format) intermediate_result(y integer, x integer) -DEBUG: Plan 107 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('107_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) foo WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.y FROM read_intermediate_result('107_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer))) ORDER BY x +DEBUG: generating subplan XXX_4 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.y, intermediate_result.x FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(y integer, x integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) foo WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer))) ORDER BY x DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -649,19 +649,19 @@ DEBUG: Plan is router executable SELECT * FROM ((SELECT x,y FROM test) UNION (SELECT y,x FROM test)) foo WHERE x IN (SELECT y FROM test) ORDER BY 1; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 112_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 112_2 for subquery SELECT y, x FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT y, x FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 112_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('112_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.y, intermediate_result.x FROM read_intermediate_result('112_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer, x integer) +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.y, intermediate_result.x FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer, x integer) DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 112_4 for subquery SELECT y FROM recursive_union.test -DEBUG: Plan 112 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('112_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) foo WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.y FROM read_intermediate_result('112_4'::text, 'binary'::citus_copy_format) intermediate_result(y integer))) ORDER BY x +DEBUG: generating subplan XXX_4 for subquery SELECT y FROM recursive_union.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) foo WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.y FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(y integer))) ORDER BY x DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -673,14 +673,14 @@ SELECT x, y, rnk FROM (SELECT *, rank() OVER my_win as rnk FROM test WINDOW my_w ORDER BY 1 DESC, 2 DESC, 3 DESC; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 117_1 for subquery SELECT x, y, rnk FROM (SELECT test.x, test.y, rank() OVER my_win AS rnk FROM recursive_union.test WINDOW my_win AS (PARTITION BY test.x ORDER BY test.y DESC)) foo +DEBUG: generating subplan XXX_1 for subquery SELECT x, y, rnk FROM (SELECT test.x, test.y, rank() OVER my_win AS rnk FROM recursive_union.test WINDOW my_win AS (PARTITION BY test.x ORDER BY test.y DESC)) foo DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 117_2 for subquery SELECT x, y, rnk FROM (SELECT test.x, test.y, rank() OVER my_win AS rnk FROM recursive_union.test WINDOW my_win AS (PARTITION BY test.x ORDER BY test.y DESC)) bar -DEBUG: Plan 117 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y, intermediate_result.rnk FROM read_intermediate_result('117_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer, rnk bigint) UNION SELECT intermediate_result.x, intermediate_result.y, intermediate_result.rnk FROM read_intermediate_result('117_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer, rnk bigint) ORDER BY 1 DESC, 2 DESC, 3 DESC +DEBUG: generating subplan XXX_2 for subquery SELECT x, y, rnk FROM (SELECT test.x, test.y, rank() OVER my_win AS rnk FROM recursive_union.test WINDOW my_win AS (PARTITION BY test.x ORDER BY test.y DESC)) bar +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y, intermediate_result.rnk FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer, rnk bigint) UNION SELECT intermediate_result.x, intermediate_result.y, intermediate_result.rnk FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer, rnk bigint) ORDER BY 1 DESC, 2 DESC, 3 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x | y | rnk ----+---+----- + x | y | rnk +--------------------------------------------------------------------- 2 | 2 | 1 1 | 1 | 1 (2 rows) @@ -698,16 +698,16 @@ SELECT * FROM ((SELECT * FROM test) EXCEPT (SELECT * FROM test ORDER BY x LIMIT DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 1 -DEBUG: generating subplan 122_1 for subquery SELECT x, y FROM recursive_union.test ORDER BY x LIMIT 1 +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test ORDER BY x LIMIT 1 DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 122_2 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 122_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('122_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('122_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) -DEBUG: Plan 122 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('122_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries - x | y | y ----+---+--- + x | y | y +--------------------------------------------------------------------- 2 | 2 | 2 (1 row) @@ -715,13 +715,13 @@ SELECT * FROM ((SELECT * FROM test) INTERSECT (SELECT * FROM test ORDER BY x LIM DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 1 -DEBUG: generating subplan 126_1 for subquery SELECT x, y FROM recursive_union.test ORDER BY x LIMIT 1 +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test ORDER BY x LIMIT 1 DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 126_2 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 126_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('126_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('126_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) -DEBUG: Plan 126 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('126_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u LEFT JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u LEFT JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries ERROR: cannot pushdown the subquery DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer join @@ -729,20 +729,20 @@ DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer jo SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM ref WHERE a IN (SELECT x FROM test))) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 130_1 for subquery SELECT x FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 130_2 for subquery SELECT a, b FROM recursive_union.ref WHERE (a OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('130_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer))) +DEBUG: generating subplan XXX_2 for subquery SELECT a, b FROM recursive_union.ref WHERE (a OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer))) DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 130_3 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_3 for subquery SELECT x, y FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 130_4 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('130_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('130_2'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) -DEBUG: Plan 130 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('130_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y +DEBUG: generating subplan XXX_4 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -750,8 +750,8 @@ DEBUG: Plan is router executable -- subquery union in WHERE clause with partition column equality and implicit join is pushed down SELECT * FROM test a WHERE x IN (SELECT x FROM test b WHERE y = 1 UNION SELECT x FROM test c WHERE y = 2) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -761,33 +761,33 @@ SELECT * FROM test a WHERE x NOT IN (SELECT x FROM test b WHERE y = 1 UNION SELE DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 137_1 for subquery SELECT x FROM recursive_union.test b WHERE (y OPERATOR(pg_catalog.=) 1) +DEBUG: generating subplan XXX_1 for subquery SELECT x FROM recursive_union.test b WHERE (y OPERATOR(pg_catalog.=) 1) DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 137_2 for subquery SELECT x FROM recursive_union.test c WHERE (y OPERATOR(pg_catalog.=) 2) -DEBUG: Plan 137 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('137_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.x FROM read_intermediate_result('137_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) +DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_union.test c WHERE (y OPERATOR(pg_catalog.=) 2) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.x FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 136_1 for subquery SELECT b.x FROM recursive_union.test b WHERE (b.y OPERATOR(pg_catalog.=) 1) UNION SELECT c.x FROM recursive_union.test c WHERE (c.y OPERATOR(pg_catalog.=) 2) -DEBUG: Plan 136 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_union.test a WHERE (NOT (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('136_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)))) ORDER BY x, y +DEBUG: generating subplan XXX_1 for subquery SELECT b.x FROM recursive_union.test b WHERE (b.y OPERATOR(pg_catalog.=) 1) UNION SELECT c.x FROM recursive_union.test c WHERE (c.y OPERATOR(pg_catalog.=) 2) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM recursive_union.test a WHERE (NOT (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)))) ORDER BY x, y DEBUG: Router planner cannot handle multi-shard select queries - x | y ----+--- + x | y +--------------------------------------------------------------------- (0 rows) -- subquery union in WHERE clause without parition column equality is recursively planned SELECT * FROM test a WHERE x IN (SELECT x FROM test b UNION SELECT y FROM test c) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 140_1 for subquery SELECT x FROM recursive_union.test b +DEBUG: generating subplan XXX_1 for subquery SELECT x FROM recursive_union.test b DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 140_2 for subquery SELECT y FROM recursive_union.test c +DEBUG: generating subplan XXX_2 for subquery SELECT y FROM recursive_union.test c DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 140_3 for subquery SELECT intermediate_result.x FROM read_intermediate_result('140_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('140_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer) -DEBUG: Plan 140 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_union.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('140_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer))) ORDER BY x, y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM recursive_union.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer))) ORDER BY x, y DEBUG: Router planner cannot handle multi-shard select queries - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -796,10 +796,10 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM test a WHERE x IN (SELECT x FROM test b UNION SELECT y FROM test c WHERE a.x = c.x) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 144_1 for subquery SELECT x FROM recursive_union.test b +DEBUG: generating subplan XXX_1 for subquery SELECT x FROM recursive_union.test b DEBUG: skipping recursive planning for the subquery since it contains references to outer queries DEBUG: skipping recursive planning for the subquery since it contains references to outer queries -DEBUG: Plan 144 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_union.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('144_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT c.y FROM recursive_union.test c WHERE (a.x OPERATOR(pg_catalog.=) c.x))) ORDER BY x, y +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM recursive_union.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT c.y FROM recursive_union.test c WHERE (a.x OPERATOR(pg_catalog.=) c.x))) ORDER BY x, y DEBUG: Router planner cannot handle multi-shard select queries DEBUG: skipping recursive planning for the subquery since it contains references to outer queries DEBUG: skipping recursive planning for the subquery since it contains references to outer queries @@ -810,18 +810,18 @@ SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test) ORDER BY 1,2 LIMI DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 147_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 147_2 for subquery SELECT x, y FROM recursive_union.test -DEBUG: Plan 147 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('147_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('147_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 LIMIT 5 +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_union.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 LIMIT 5 DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 146_1 for subquery SELECT test.x, test.y FROM recursive_union.test UNION SELECT test.x, test.y FROM recursive_union.test ORDER BY 1, 2 LIMIT 5 -DEBUG: Plan 146 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('146_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) foo ORDER BY x DESC LIMIT 3 +DEBUG: generating subplan XXX_1 for subquery SELECT test.x, test.y FROM recursive_union.test UNION SELECT test.x, test.y FROM recursive_union.test ORDER BY 1, 2 LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) foo ORDER BY x DESC LIMIT 3 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 2 | 2 1 | 1 (2 rows) @@ -830,34 +830,34 @@ DEBUG: Plan is router executable select count(DISTINCT t.x) FROM ((SELECT DISTINCT x FROM test) UNION (SELECT DISTINCT y FROM test)) as t(x) ORDER BY 1; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 150_1 for subquery SELECT DISTINCT y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 150_2 for subquery SELECT DISTINCT x FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT x FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 150_3 for subquery SELECT intermediate_result.x FROM read_intermediate_result('150_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('150_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer) -DEBUG: Plan 150 query after replacing subqueries and CTEs: SELECT count(DISTINCT x) AS count FROM (SELECT intermediate_result.x FROM read_intermediate_result('150_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) t(x) ORDER BY (count(DISTINCT x)) +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(DISTINCT x) AS count FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) t(x) ORDER BY (count(DISTINCT x)) DEBUG: Creating router plan DEBUG: Plan is router executable - count -------- + count +--------------------------------------------------------------------- 2 (1 row) select count(DISTINCT t.x) FROM ((SELECT count(DISTINCT x) FROM test) UNION (SELECT count(DISTINCT y) FROM test)) as t(x) ORDER BY 1; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 154_1 for subquery SELECT count(DISTINCT x) AS count FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT count(DISTINCT x) AS count FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 154_2 for subquery SELECT count(DISTINCT y) AS count FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT count(DISTINCT y) AS count FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 154_3 for subquery SELECT intermediate_result.count FROM read_intermediate_result('154_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint) UNION SELECT intermediate_result.count FROM read_intermediate_result('154_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint) -DEBUG: Plan 154 query after replacing subqueries and CTEs: SELECT count(DISTINCT x) AS count FROM (SELECT intermediate_result.count FROM read_intermediate_result('154_3'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) t(x) ORDER BY (count(DISTINCT x)) +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint) UNION SELECT intermediate_result.count FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(DISTINCT x) AS count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) t(x) ORDER BY (count(DISTINCT x)) DEBUG: Creating router plan DEBUG: Plan is router executable - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -865,17 +865,17 @@ DEBUG: Plan is router executable select avg(DISTINCT t.x) FROM ((SELECT avg(DISTINCT y) FROM test GROUP BY x) UNION (SELECT avg(DISTINCT y) FROM test GROUP BY x)) as t(x) ORDER BY 1; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 158_1 for subquery SELECT avg(DISTINCT y) AS avg FROM recursive_union.test GROUP BY x +DEBUG: generating subplan XXX_1 for subquery SELECT avg(DISTINCT y) AS avg FROM recursive_union.test GROUP BY x DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 158_2 for subquery SELECT avg(DISTINCT y) AS avg FROM recursive_union.test GROUP BY x +DEBUG: generating subplan XXX_2 for subquery SELECT avg(DISTINCT y) AS avg FROM recursive_union.test GROUP BY x DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 158_3 for subquery SELECT intermediate_result.avg FROM read_intermediate_result('158_1'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric) UNION SELECT intermediate_result.avg FROM read_intermediate_result('158_2'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric) -DEBUG: Plan 158 query after replacing subqueries and CTEs: SELECT avg(DISTINCT x) AS avg FROM (SELECT intermediate_result.avg FROM read_intermediate_result('158_3'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) t(x) ORDER BY (avg(DISTINCT x)) +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.avg FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric) UNION SELECT intermediate_result.avg FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(DISTINCT x) AS avg FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) t(x) ORDER BY (avg(DISTINCT x)) DEBUG: Creating router plan DEBUG: Plan is router executable - avg ------------------------- + avg +--------------------------------------------------------------------- 1.50000000000000000000 (1 row) @@ -920,14 +920,14 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 20 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 20 -DEBUG: generating subplan 164_1 for subquery SELECT t1.x FROM recursive_union.test t1, recursive_union.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y) LIMIT 0 +DEBUG: generating subplan XXX_1 for subquery SELECT t1.x FROM recursive_union.test t1, recursive_union.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y) LIMIT 0 DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 164_2 for subquery SELECT x FROM recursive_union.test -DEBUG: Plan 164 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('164_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('164_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) ORDER BY 1 DESC +DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_union.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x ---- + x +--------------------------------------------------------------------- (0 rows) -- repartition is recursively planned with the set operation @@ -962,14 +962,14 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 20 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 20 -DEBUG: generating subplan 167_1 for subquery SELECT t1.x FROM recursive_union.test t1, recursive_union.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y) +DEBUG: generating subplan XXX_1 for subquery SELECT t1.x FROM recursive_union.test t1, recursive_union.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y) DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 167_2 for subquery SELECT x FROM recursive_union.test -DEBUG: Plan 167 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('167_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('167_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) ORDER BY 1 DESC +DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_union.test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x ---- + x +--------------------------------------------------------------------- 2 1 (2 rows) @@ -980,17 +980,17 @@ CREATE VIEW set_view_recursive AS (SELECT y FROM test) UNION (SELECT y FROM test SELECT * FROM set_view_recursive ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 170_1 for subquery SELECT y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 170_2 for subquery SELECT y FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT y FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 170_3 for subquery SELECT intermediate_result.y FROM read_intermediate_result('170_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('170_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer) -DEBUG: Plan 170 query after replacing subqueries and CTEs: SELECT y FROM (SELECT intermediate_result.y FROM read_intermediate_result('170_3'::text, 'binary'::citus_copy_format) intermediate_result(y integer)) set_view_recursive ORDER BY y DESC +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT y FROM (SELECT intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(y integer)) set_view_recursive ORDER BY y DESC DEBUG: Creating router plan DEBUG: Plan is router executable - y ---- + y +--------------------------------------------------------------------- 2 1 (2 rows) @@ -999,8 +999,8 @@ DEBUG: Plan is router executable CREATE VIEW set_view_pushdown AS (SELECT x FROM test) UNION (SELECT x FROM test); SELECT * FROM set_view_pushdown ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries - x ---- + x +--------------------------------------------------------------------- 2 1 (2 rows) @@ -1010,16 +1010,16 @@ CREATE VIEW set_view_recursive_second AS SELECT u.x, test.y FROM ((SELECT x, y F SELECT * FROM set_view_recursive_second ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 175_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 175_2 for subquery SELECT 1, 1 FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT 1, 1 FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 175_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('175_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('175_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer) -DEBUG: Plan 175 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT u.x, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('175_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, test.y) set_view_recursive_second ORDER BY x, y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT u.x, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, test.y) set_view_recursive_second ORDER BY x, y DEBUG: Router planner cannot handle multi-shard select queries - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -1028,45 +1028,45 @@ DEBUG: Router planner cannot handle multi-shard select queries ((SELECT x FROM set_view_recursive_second) INTERSECT (SELECT * FROM set_view_recursive)) EXCEPT (SELECT * FROM set_view_pushdown); DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 179_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 179_2 for subquery SELECT 1, 1 FROM recursive_union.test +DEBUG: generating subplan XXX_2 for subquery SELECT 1, 1 FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 179_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('179_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('179_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer) +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer) DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 179_4 for subquery SELECT y FROM recursive_union.test +DEBUG: generating subplan XXX_4 for subquery SELECT y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 179_5 for subquery SELECT y FROM recursive_union.test +DEBUG: generating subplan XXX_5 for subquery SELECT y FROM recursive_union.test DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 179_6 for subquery SELECT intermediate_result.y FROM read_intermediate_result('179_4'::text, 'binary'::citus_copy_format) intermediate_result(y integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('179_5'::text, 'binary'::citus_copy_format) intermediate_result(y integer) +DEBUG: generating subplan XXX_6 for subquery SELECT intermediate_result.y FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(y integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(y integer) DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 179_7 for subquery SELECT x FROM (SELECT u.x, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('179_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, test.y) set_view_recursive_second +DEBUG: generating subplan XXX_7 for subquery SELECT x FROM (SELECT u.x, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, test.y) set_view_recursive_second DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 179_8 for subquery SELECT x FROM (SELECT test.x FROM recursive_union.test UNION SELECT test.x FROM recursive_union.test) set_view_pushdown -DEBUG: Plan 179 query after replacing subqueries and CTEs: (SELECT intermediate_result.x FROM read_intermediate_result('179_7'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT set_view_recursive.y FROM (SELECT intermediate_result.y FROM read_intermediate_result('179_6'::text, 'binary'::citus_copy_format) intermediate_result(y integer)) set_view_recursive) EXCEPT SELECT intermediate_result.x FROM read_intermediate_result('179_8'::text, 'binary'::citus_copy_format) intermediate_result(x integer) +DEBUG: generating subplan XXX_8 for subquery SELECT x FROM (SELECT test.x FROM recursive_union.test UNION SELECT test.x FROM recursive_union.test) set_view_pushdown +DEBUG: Plan XXX query after replacing subqueries and CTEs: (SELECT intermediate_result.x FROM read_intermediate_result('XXX_7'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT set_view_recursive.y FROM (SELECT intermediate_result.y FROM read_intermediate_result('XXX_6'::text, 'binary'::citus_copy_format) intermediate_result(y integer)) set_view_recursive) EXCEPT SELECT intermediate_result.x FROM read_intermediate_result('XXX_8'::text, 'binary'::citus_copy_format) intermediate_result(x integer) DEBUG: Creating router plan DEBUG: Plan is router executable - x ---- + x +--------------------------------------------------------------------- (0 rows) -- queries on non-colocated tables that would push down if they were not colocated are recursivelu planned SELECT * FROM (SELECT * FROM test UNION SELECT * FROM test_not_colocated) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 188_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 188_2 for subquery SELECT x, y FROM recursive_union.test_not_colocated +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_union.test_not_colocated DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 188_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('188_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('188_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) -DEBUG: Plan 188 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('188_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -1074,17 +1074,17 @@ DEBUG: Plan is router executable SELECT * FROM (SELECT * FROM test UNION ALL SELECT * FROM test_not_colocated) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 192_1 for subquery SELECT x, y FROM recursive_union.test +DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 192_2 for subquery SELECT x, y FROM recursive_union.test_not_colocated +DEBUG: generating subplan XXX_2 for subquery SELECT x, y FROM recursive_union.test_not_colocated DEBUG: Creating router plan DEBUG: Plan is router executable -DEBUG: generating subplan 192_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('192_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION ALL SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('192_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) -DEBUG: Plan 192 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('192_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION ALL SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y ----+--- + x | y +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) diff --git a/src/test/regress/expected/single_hash_repartition_join.out b/src/test/regress/expected/single_hash_repartition_join.out index 2f6e88b88..3e81abb7c 100644 --- a/src/test/regress/expected/single_hash_repartition_join.out +++ b/src/test/regress/expected/single_hash_repartition_join.out @@ -9,28 +9,28 @@ CREATE TABLE single_hash_repartition_second (id int, sum int, avg float); CREATE TABLE ref_table (id int, sum int, avg float); SET citus.shard_count TO 4; SELECT create_distributed_table('single_hash_repartition_first', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('single_hash_repartition_second', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('ref_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SET citus.log_multi_join_order TO ON; SET client_min_messages TO DEBUG2; -- a very basic single hash re-partitioning example -EXPLAIN SELECT - count(*) +EXPLAIN SELECT + count(*) FROM single_hash_repartition_first t1, single_hash_repartition_second t2 WHERE @@ -60,8 +60,8 @@ DETAIL: Creating dependency on merge taskId 20 ERROR: the query contains a join that requires repartitioning HINT: Set citus.enable_repartition_joins to on to enable repartitioning -- the same query with the orders of the tables have changed -EXPLAIN SELECT - count(*) +EXPLAIN SELECT + count(*) FROM single_hash_repartition_second t1, single_hash_repartition_first t2 WHERE @@ -122,8 +122,8 @@ DETAIL: Creating dependency on merge taskId 20 ERROR: the query contains a join that requires repartitioning HINT: Set citus.enable_repartition_joins to on to enable repartitioning -- a more complicated join order, first colocated join, later single hash repartition join -EXPLAIN SELECT - count(*) +EXPLAIN SELECT + count(*) FROM single_hash_repartition_first t1, single_hash_repartition_first t2, single_hash_repartition_second t3 WHERE @@ -165,8 +165,8 @@ DETAIL: Creating dependency on merge taskId 20 ERROR: the query contains a join that requires repartitioning HINT: Set citus.enable_repartition_joins to on to enable repartitioning -- a more complicated join order, first hash-repartition join, later single hash repartition join -EXPLAIN SELECT - count(*) +EXPLAIN SELECT + count(*) FROM single_hash_repartition_first t1, single_hash_repartition_first t2, single_hash_repartition_second t3 WHERE @@ -224,8 +224,8 @@ DETAIL: Creating dependency on merge taskId 20 ERROR: the query contains a join that requires repartitioning HINT: Set citus.enable_repartition_joins to on to enable repartitioning -- single hash repartitioning is not supported between different column types -EXPLAIN SELECT - count(*) +EXPLAIN SELECT + count(*) FROM single_hash_repartition_first t1, single_hash_repartition_first t2, single_hash_repartition_second t3 WHERE @@ -248,9 +248,9 @@ ERROR: cannot perform distributed planning on this query DETAIL: Cartesian products are currently unsupported -- single repartition query in CTE -- should work fine -EXPLAIN WITH cte1 AS +EXPLAIN WITH cte1 AS ( - SELECT + SELECT t1.id * t2.avg as data FROM single_hash_repartition_first t1, single_hash_repartition_second t2 @@ -267,7 +267,7 @@ FROM WHERE cte1.data > single_hash_repartition_first.id; DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan 7_1 for CTE cte1: SELECT ((t1.id)::double precision OPERATOR(pg_catalog.*) t2.avg) AS data FROM single_hash_repartition.single_hash_repartition_first t1, single_hash_repartition.single_hash_repartition_second t2 WHERE ((t1.id OPERATOR(pg_catalog.=) t2.sum) AND (t1.sum OPERATOR(pg_catalog.>) 5000)) ORDER BY ((t1.id)::double precision OPERATOR(pg_catalog.*) t2.avg) DESC LIMIT 50 +DEBUG: generating subplan XXX_1 for CTE cte1: SELECT ((t1.id)::double precision OPERATOR(pg_catalog.*) t2.avg) AS data FROM single_hash_repartition.single_hash_repartition_first t1, single_hash_repartition.single_hash_repartition_second t2 WHERE ((t1.id OPERATOR(pg_catalog.=) t2.sum) AND (t1.sum OPERATOR(pg_catalog.>) 5000)) ORDER BY ((t1.id)::double precision OPERATOR(pg_catalog.*) t2.avg) DESC LIMIT 50 DEBUG: Router planner cannot handle multi-shard select queries LOG: join order: [ "single_hash_repartition_first" ][ single hash partition join "single_hash_repartition_second" ] DEBUG: push down of limit count: 50 @@ -294,7 +294,7 @@ DETAIL: Creating dependency on merge taskId 20 ERROR: the query contains a join that requires repartitioning HINT: Set citus.enable_repartition_joins to on to enable repartitioning -- two single repartitions -EXPLAIN SELECT +EXPLAIN SELECT count(*) FROM single_hash_repartition_first t1, single_hash_repartition_second t2, single_hash_repartition_second t3 @@ -344,9 +344,9 @@ DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 24 ERROR: the query contains a join that requires repartitioning HINT: Set citus.enable_repartition_joins to on to enable repartitioning --- two single repartitions again, but this +-- two single repartitions again, but this -- time the columns of the second join is reverted -EXPLAIN SELECT +EXPLAIN SELECT avg(t1.avg + t2.avg) FROM single_hash_repartition_first t1, single_hash_repartition_second t2, single_hash_repartition_second t3 @@ -404,8 +404,8 @@ HINT: Set citus.enable_repartition_joins to on to enable repartitioning -- the following queries should also be a single hash repartition queries -- note that since we've manually updated the metadata without changing the -- the corresponding data, the results of the query would be wrong -EXPLAIN SELECT - count(*) +EXPLAIN SELECT + count(*) FROM single_hash_repartition_first t1, single_hash_repartition_second t2 WHERE @@ -437,8 +437,8 @@ HINT: Set citus.enable_repartition_joins to on to enable repartitioning -- the following queries should also be a single hash repartition queries -- note that since we've manually updated the metadata without changing the -- the corresponding data, the results of the query would be wrong -EXPLAIN SELECT - count(*) +EXPLAIN SELECT + count(*) FROM single_hash_repartition_first t1, single_hash_repartition_second t2 WHERE diff --git a/src/test/regress/expected/sql_procedure.out b/src/test/regress/expected/sql_procedure.out index 3da4f0386..ac7f878eb 100644 --- a/src/test/regress/expected/sql_procedure.out +++ b/src/test/regress/expected/sql_procedure.out @@ -9,9 +9,9 @@ SET SEARCH_PATH = procedure_schema; CREATE TABLE test_table(id integer , org_id integer); CREATE UNIQUE INDEX idx_table ON test_table(id, org_id); SELECT create_distributed_table('test_table','id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_table VALUES(1, 1); @@ -22,8 +22,8 @@ CREATE PROCEDURE test_procedure_delete_insert(id int, org_id int) LANGUAGE SQL A $$; CALL test_procedure_delete_insert(2,3); SELECT * FROM test_table ORDER BY 1, 2; - id | org_id -----+-------- + id | org_id +--------------------------------------------------------------------- 2 | 3 (1 row) @@ -40,8 +40,8 @@ CALL test_procedure_commit(2,5); ERROR: COMMIT is not allowed in a SQL function CONTEXT: SQL function "test_procedure_commit" during startup SELECT * FROM test_table ORDER BY 1, 2; - id | org_id -----+-------- + id | org_id +--------------------------------------------------------------------- 2 | 3 (1 row) @@ -55,8 +55,8 @@ CALL test_procedure_rollback(2,15); ERROR: ROLLBACK is not allowed in a SQL function CONTEXT: SQL function "test_procedure_rollback" during startup SELECT * FROM test_table ORDER BY 1, 2; - id | org_id -----+-------- + id | org_id +--------------------------------------------------------------------- 2 | 3 (1 row) @@ -73,8 +73,8 @@ END; $$; CALL test_procedure_delete_insert(2,3); SELECT * FROM test_table ORDER BY 1, 2; - id | org_id -----+-------- + id | org_id +--------------------------------------------------------------------- 2 | 3 (1 row) @@ -90,12 +90,12 @@ $$; CALL test_procedure_modify_insert(2,12); ERROR: duplicate key value violates unique constraint "idx_table_100503" DETAIL: Key (id, org_id)=(2, 12) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx SQL statement "INSERT INTO test_table VALUES (tt_id, tt_org_id)" PL/pgSQL function test_procedure_modify_insert(integer,integer) line 5 at SQL statement SELECT * FROM test_table ORDER BY 1, 2; - id | org_id -----+-------- + id | org_id +--------------------------------------------------------------------- 2 | 12 (1 row) @@ -110,12 +110,12 @@ $$; CALL test_procedure_modify_insert_commit(2,30); ERROR: duplicate key value violates unique constraint "idx_table_100503" DETAIL: Key (id, org_id)=(2, 30) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx SQL statement "INSERT INTO test_table VALUES (tt_id, tt_org_id)" PL/pgSQL function test_procedure_modify_insert_commit(integer,integer) line 5 at SQL statement SELECT * FROM test_table ORDER BY 1, 2; - id | org_id -----+-------- + id | org_id +--------------------------------------------------------------------- 2 | 30 (1 row) @@ -130,8 +130,8 @@ END; $$; CALL test_procedure_rollback(2,5); SELECT * FROM test_table ORDER BY 1, 2; - id | org_id -----+-------- + id | org_id +--------------------------------------------------------------------- (0 rows) -- rollback is successfull when insert is on multiple rows @@ -145,8 +145,8 @@ END; $$; CALL test_procedure_rollback_2(12, 15); SELECT * FROM test_table ORDER BY 1, 2; - id | org_id -----+-------- + id | org_id +--------------------------------------------------------------------- (0 rows) -- delete is rolled back, update is committed @@ -161,8 +161,8 @@ $$; INSERT INTO test_table VALUES (1, 1), (2, 2); CALL test_procedure_rollback_3(2,15); SELECT * FROM test_table ORDER BY 1, 2; - id | org_id -----+-------- + id | org_id +--------------------------------------------------------------------- 1 | 1 2 | 15 (2 rows) @@ -190,15 +190,15 @@ BEGIN END; $$; SELECT * from test_table; - id | org_id -----+-------- + id | org_id +--------------------------------------------------------------------- (0 rows) call test_procedure(1,1); call test_procedure(20, 20); SELECT * from test_table; - id | org_id -----+-------- + id | org_id +--------------------------------------------------------------------- (0 rows) \set VERBOSITY terse diff --git a/src/test/regress/expected/ssl_by_default.out b/src/test/regress/expected/ssl_by_default.out index e05686368..d75bc1b28 100644 --- a/src/test/regress/expected/ssl_by_default.out +++ b/src/test/regress/expected/ssl_by_default.out @@ -6,37 +6,37 @@ -- ssl can only be enabled by default on installations that are OpenSSL-enabled. SHOW ssl_ciphers \gset SELECT :'ssl_ciphers' != 'none' AS hasssl; - hasssl --------- + hasssl +--------------------------------------------------------------------- t (1 row) SHOW ssl; - ssl ------ + ssl +--------------------------------------------------------------------- on (1 row) SELECT run_command_on_workers($$ SHOW ssl; $$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,on) (localhost,57638,t,on) (2 rows) SHOW citus.node_conninfo; - citus.node_conninfo ---------------------- + citus.node_conninfo +--------------------------------------------------------------------- sslmode=require (1 row) SELECT run_command_on_workers($$ SHOW citus.node_conninfo; $$); - run_command_on_workers -------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,sslmode=require) (localhost,57638,t,sslmode=require) (2 rows) @@ -44,23 +44,23 @@ $$); SELECT run_command_on_workers($$ SELECT ssl FROM pg_stat_ssl WHERE pid = pg_backend_pid(); $$); - run_command_on_workers ------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,t) (localhost,57638,t,t) (2 rows) SHOW ssl_ciphers; - ssl_ciphers ----------------------------- + ssl_ciphers +--------------------------------------------------------------------- TLSv1.2+HIGH:!aNULL:!eNULL (1 row) SELECT run_command_on_workers($$ SHOW ssl_ciphers; $$); - run_command_on_workers ------------------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,TLSv1.2+HIGH:!aNULL:!eNULL) (localhost,57638,t,TLSv1.2+HIGH:!aNULL:!eNULL) (2 rows) diff --git a/src/test/regress/expected/subqueries_deep.out b/src/test/regress/expected/subqueries_deep.out index e4a3b2ba2..c270018c4 100644 --- a/src/test/regress/expected/subqueries_deep.out +++ b/src/test/regress/expected/subqueries_deep.out @@ -7,39 +7,39 @@ SET client_min_messages TO DEBUG1; -- subquery in FROM -> FROM -> FROM should be replaced due to OFFSET -- one level up subquery should be replaced due to GROUP BY on non partition key -- one level up subquery should be replaced due to LIMUT -SELECT - DISTINCT user_id -FROM +SELECT + DISTINCT user_id +FROM ( - SELECT users_table.user_id FROM users_table, + SELECT users_table.user_id FROM users_table, ( - SELECT + SELECT avg(event_type) as avg_val FROM (SELECT - event_type, users_table.user_id - FROM + event_type, users_table.user_id + FROM users_table, (SELECT user_id, event_type FROM events_table WHERE value_2 < 3 ORDER BY 1, 2 OFFSET 3) as foo - WHERE - foo.user_id = users_table.user_id) bar, users_table - WHERE - bar.user_id = users_table.user_id - GROUP BY + WHERE + foo.user_id = users_table.user_id) bar, users_table + WHERE + bar.user_id = users_table.user_id + GROUP BY users_table.value_1 ) as baz - WHERE + WHERE baz.avg_val < users_table.user_id ORDER BY 1 LIMIT 3 ) as sub1 ORDER BY 1 DESC; -DEBUG: generating subplan 1_1 for subquery SELECT user_id, event_type FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.<) 3) ORDER BY user_id, event_type OFFSET 3 -DEBUG: generating subplan 1_2 for subquery SELECT avg(bar.event_type) AS avg_val FROM (SELECT foo.event_type, users_table_1.user_id FROM public.users_table users_table_1, (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) users_table_1.user_id)) bar, public.users_table WHERE (bar.user_id OPERATOR(pg_catalog.=) users_table.user_id) GROUP BY users_table.value_1 +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, event_type FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.<) 3) ORDER BY user_id, event_type OFFSET 3 +DEBUG: generating subplan XXX_2 for subquery SELECT avg(bar.event_type) AS avg_val FROM (SELECT foo.event_type, users_table_1.user_id FROM public.users_table users_table_1, (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) users_table_1.user_id)) bar, public.users_table WHERE (bar.user_id OPERATOR(pg_catalog.=) users_table.user_id) GROUP BY users_table.value_1 DEBUG: push down of limit count: 3 -DEBUG: generating subplan 1_3 for subquery SELECT users_table.user_id FROM public.users_table, (SELECT intermediate_result.avg_val FROM read_intermediate_result('1_2'::text, 'binary'::citus_copy_format) intermediate_result(avg_val numeric)) baz WHERE (baz.avg_val OPERATOR(pg_catalog.<) (users_table.user_id)::numeric) ORDER BY users_table.user_id LIMIT 3 -DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('1_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) sub1 ORDER BY user_id DESC - user_id ---------- +DEBUG: generating subplan XXX_3 for subquery SELECT users_table.user_id FROM public.users_table, (SELECT intermediate_result.avg_val FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(avg_val numeric)) baz WHERE (baz.avg_val OPERATOR(pg_catalog.<) (users_table.user_id)::numeric) ORDER BY users_table.user_id LIMIT 3 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) sub1 ORDER BY user_id DESC + user_id +--------------------------------------------------------------------- 3 (1 row) @@ -51,18 +51,18 @@ SELECT event, array_length(events_table, 1) FROM ( SELECT event, array_agg(t.user_id) AS events_table FROM ( - SELECT + SELECT DISTINCT ON(e.event_type::text) e.event_type::text as event, e.time, e.user_id - FROM + FROM users_table AS u, events_table AS e - WHERE u.user_id = e.user_id AND - u.user_id IN + WHERE u.user_id = e.user_id AND + u.user_id IN ( - SELECT - user_id - FROM - users_table + SELECT + user_id + FROM + users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 1 AND event_type <= 3 AND value_3 > 1 AND user_id = users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 3 AND event_type <= 4 AND value_3 > 1 AND user_id = users_table.user_id) @@ -73,14 +73,14 @@ FROM ( GROUP BY event ) q ORDER BY 2 DESC, 1; -DEBUG: generating subplan 5_1 for CTE cte: SELECT count(*) AS count FROM public.users_table +DEBUG: generating subplan XXX_1 for CTE cte: SELECT count(*) AS count FROM public.users_table DEBUG: push down of limit count: 5 -DEBUG: generating subplan 5_2 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<=) 3) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id))))) AND (EXISTS (SELECT cte.count FROM (SELECT intermediate_result.count FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) cte))) LIMIT 5 -DEBUG: generating subplan 5_3 for subquery SELECT DISTINCT ON ((e.event_type)::text) (e.event_type)::text AS event, e."time", e.user_id FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('5_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) -DEBUG: generating subplan 5_4 for subquery SELECT t.event, array_agg(t.user_id) AS events_table FROM (SELECT intermediate_result.event, intermediate_result."time", intermediate_result.user_id FROM read_intermediate_result('5_3'::text, 'binary'::citus_copy_format) intermediate_result(event text, "time" timestamp without time zone, user_id integer)) t, public.users_table WHERE (users_table.value_1 OPERATOR(pg_catalog.=) (t.event)::integer) GROUP BY t.event -DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT event, array_length(events_table, 1) AS array_length FROM (SELECT intermediate_result.event, intermediate_result.events_table FROM read_intermediate_result('5_4'::text, 'binary'::citus_copy_format) intermediate_result(event text, events_table integer[])) q ORDER BY (array_length(events_table, 1)) DESC, event - event | array_length --------+-------------- +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<=) 3) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id))))) AND (EXISTS (SELECT cte.count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) cte))) LIMIT 5 +DEBUG: generating subplan XXX_3 for subquery SELECT DISTINCT ON ((e.event_type)::text) (e.event_type)::text AS event, e."time", e.user_id FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) +DEBUG: generating subplan XXX_4 for subquery SELECT t.event, array_agg(t.user_id) AS events_table FROM (SELECT intermediate_result.event, intermediate_result."time", intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(event text, "time" timestamp without time zone, user_id integer)) t, public.users_table WHERE (users_table.value_1 OPERATOR(pg_catalog.=) (t.event)::integer) GROUP BY t.event +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT event, array_length(events_table, 1) AS array_length FROM (SELECT intermediate_result.event, intermediate_result.events_table FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(event text, events_table integer[])) q ORDER BY (array_length(events_table, 1)) DESC, event + event | array_length +--------------------------------------------------------------------- 3 | 26 4 | 21 2 | 18 @@ -89,32 +89,32 @@ DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT event, array_le 5 | 9 (6 rows) --- this test probably doesn't add too much value, +-- this test probably doesn't add too much value, -- but recurse 6 times for fun SELECT count(*) FROM ( - SELECT avg(min) FROM + SELECT avg(min) FROM ( SELECT min(users_table.value_1) FROM ( - SELECT avg(event_type) as avg_ev_type FROM + SELECT avg(event_type) as avg_ev_type FROM ( - SELECT - max(value_1) as mx_val_1 + SELECT + max(value_1) as mx_val_1 FROM ( - SELECT + SELECT avg(event_type) as avg FROM ( - SELECT - cnt - FROM + SELECT + cnt + FROM (SELECT count(*) as cnt, value_2 FROM users_table GROUP BY value_2) as level_1, users_table - WHERE + WHERE users_table.user_id = level_1.cnt ) as level_2, events_table - WHERE events_table.user_id = level_2.cnt + WHERE events_table.user_id = level_2.cnt GROUP BY level_2.cnt ) as level_3, users_table WHERE user_id = level_3.avg @@ -123,57 +123,57 @@ FROM WHERE level_4.mx_val_1 = events_table.user_id GROUP BY level_4.mx_val_1 ) as level_5, users_table - WHERE + WHERE level_5.avg_ev_type = users_table.user_id - GROUP BY + GROUP BY level_5.avg_ev_type ) as level_6, users_table WHERE users_table.user_id = level_6.min GROUP BY users_table.value_1 ) as bar; -DEBUG: generating subplan 10_1 for subquery SELECT count(*) AS cnt, value_2 FROM public.users_table GROUP BY value_2 -DEBUG: generating subplan 10_2 for subquery SELECT avg(events_table.event_type) AS avg FROM (SELECT level_1.cnt FROM (SELECT intermediate_result.cnt, intermediate_result.value_2 FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint, value_2 integer)) level_1, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_1.cnt)) level_2, public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) level_2.cnt) GROUP BY level_2.cnt -DEBUG: generating subplan 10_3 for subquery SELECT max(users_table.value_1) AS mx_val_1 FROM (SELECT intermediate_result.avg FROM read_intermediate_result('10_2'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) level_3, public.users_table WHERE ((users_table.user_id)::numeric OPERATOR(pg_catalog.=) level_3.avg) GROUP BY level_3.avg -DEBUG: generating subplan 10_4 for subquery SELECT avg(events_table.event_type) AS avg_ev_type FROM (SELECT intermediate_result.mx_val_1 FROM read_intermediate_result('10_3'::text, 'binary'::citus_copy_format) intermediate_result(mx_val_1 integer)) level_4, public.events_table WHERE (level_4.mx_val_1 OPERATOR(pg_catalog.=) events_table.user_id) GROUP BY level_4.mx_val_1 -DEBUG: generating subplan 10_5 for subquery SELECT min(users_table.value_1) AS min FROM (SELECT intermediate_result.avg_ev_type FROM read_intermediate_result('10_4'::text, 'binary'::citus_copy_format) intermediate_result(avg_ev_type numeric)) level_5, public.users_table WHERE (level_5.avg_ev_type OPERATOR(pg_catalog.=) (users_table.user_id)::numeric) GROUP BY level_5.avg_ev_type -DEBUG: generating subplan 10_6 for subquery SELECT avg(level_6.min) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('10_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer)) level_6, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_6.min) GROUP BY users_table.value_1 -DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('10_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT count(*) AS cnt, value_2 FROM public.users_table GROUP BY value_2 +DEBUG: generating subplan XXX_2 for subquery SELECT avg(events_table.event_type) AS avg FROM (SELECT level_1.cnt FROM (SELECT intermediate_result.cnt, intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint, value_2 integer)) level_1, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_1.cnt)) level_2, public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) level_2.cnt) GROUP BY level_2.cnt +DEBUG: generating subplan XXX_3 for subquery SELECT max(users_table.value_1) AS mx_val_1 FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) level_3, public.users_table WHERE ((users_table.user_id)::numeric OPERATOR(pg_catalog.=) level_3.avg) GROUP BY level_3.avg +DEBUG: generating subplan XXX_4 for subquery SELECT avg(events_table.event_type) AS avg_ev_type FROM (SELECT intermediate_result.mx_val_1 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(mx_val_1 integer)) level_4, public.events_table WHERE (level_4.mx_val_1 OPERATOR(pg_catalog.=) events_table.user_id) GROUP BY level_4.mx_val_1 +DEBUG: generating subplan XXX_5 for subquery SELECT min(users_table.value_1) AS min FROM (SELECT intermediate_result.avg_ev_type FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(avg_ev_type numeric)) level_5, public.users_table WHERE (level_5.avg_ev_type OPERATOR(pg_catalog.=) (users_table.user_id)::numeric) GROUP BY level_5.avg_ev_type +DEBUG: generating subplan XXX_6 for subquery SELECT avg(level_6.min) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer)) level_6, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_6.min) GROUP BY users_table.value_1 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar + count +--------------------------------------------------------------------- 0 (1 row) -- same query happening in the subqueries in WHERE --- this test probably doesn't add too much value, +-- this test probably doesn't add too much value, -- but recurse 6 times for fun -SELECT - * -FROM - users_table +SELECT + * +FROM + users_table WHERE user_id IN ( SELECT count(*) FROM ( - SELECT avg(min) FROM + SELECT avg(min) FROM ( SELECT min(users_table.value_1) FROM ( - SELECT avg(event_type) as avg_ev_type FROM + SELECT avg(event_type) as avg_ev_type FROM ( - SELECT - max(value_1) as mx_val_1 + SELECT + max(value_1) as mx_val_1 FROM ( - SELECT + SELECT avg(event_type) as avg FROM ( - SELECT - cnt - FROM + SELECT + cnt + FROM (SELECT count(*) as cnt, value_2 FROM users_table GROUP BY value_2) as level_1, users_table - WHERE + WHERE users_table.user_id = level_1.cnt ) as level_2, events_table - WHERE events_table.user_id = level_2.cnt + WHERE events_table.user_id = level_2.cnt GROUP BY level_2.cnt ) as level_3, users_table WHERE user_id = level_3.avg @@ -182,23 +182,23 @@ WHERE user_id IN ( WHERE level_4.mx_val_1 = events_table.user_id GROUP BY level_4.mx_val_1 ) as level_5, users_table - WHERE + WHERE level_5.avg_ev_type = users_table.user_id - GROUP BY + GROUP BY level_5.avg_ev_type ) as level_6, users_table WHERE users_table.user_id = level_6.min GROUP BY users_table.value_1 ) as bar); -DEBUG: generating subplan 17_1 for subquery SELECT count(*) AS cnt, value_2 FROM public.users_table GROUP BY value_2 -DEBUG: generating subplan 17_2 for subquery SELECT avg(events_table.event_type) AS avg FROM (SELECT level_1.cnt FROM (SELECT intermediate_result.cnt, intermediate_result.value_2 FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint, value_2 integer)) level_1, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_1.cnt)) level_2, public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) level_2.cnt) GROUP BY level_2.cnt -DEBUG: generating subplan 17_3 for subquery SELECT max(users_table.value_1) AS mx_val_1 FROM (SELECT intermediate_result.avg FROM read_intermediate_result('17_2'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) level_3, public.users_table WHERE ((users_table.user_id)::numeric OPERATOR(pg_catalog.=) level_3.avg) GROUP BY level_3.avg -DEBUG: generating subplan 17_4 for subquery SELECT avg(events_table.event_type) AS avg_ev_type FROM (SELECT intermediate_result.mx_val_1 FROM read_intermediate_result('17_3'::text, 'binary'::citus_copy_format) intermediate_result(mx_val_1 integer)) level_4, public.events_table WHERE (level_4.mx_val_1 OPERATOR(pg_catalog.=) events_table.user_id) GROUP BY level_4.mx_val_1 -DEBUG: generating subplan 17_5 for subquery SELECT min(users_table.value_1) AS min FROM (SELECT intermediate_result.avg_ev_type FROM read_intermediate_result('17_4'::text, 'binary'::citus_copy_format) intermediate_result(avg_ev_type numeric)) level_5, public.users_table WHERE (level_5.avg_ev_type OPERATOR(pg_catalog.=) (users_table.user_id)::numeric) GROUP BY level_5.avg_ev_type -DEBUG: generating subplan 17_6 for subquery SELECT avg(level_6.min) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('17_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer)) level_6, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_6.min) GROUP BY users_table.value_1 -DEBUG: generating subplan 17_7 for subquery SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('17_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar -DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.count FROM read_intermediate_result('17_7'::text, 'binary'::citus_copy_format) intermediate_result(count bigint))) - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- +DEBUG: generating subplan XXX_1 for subquery SELECT count(*) AS cnt, value_2 FROM public.users_table GROUP BY value_2 +DEBUG: generating subplan XXX_2 for subquery SELECT avg(events_table.event_type) AS avg FROM (SELECT level_1.cnt FROM (SELECT intermediate_result.cnt, intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint, value_2 integer)) level_1, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_1.cnt)) level_2, public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) level_2.cnt) GROUP BY level_2.cnt +DEBUG: generating subplan XXX_3 for subquery SELECT max(users_table.value_1) AS mx_val_1 FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) level_3, public.users_table WHERE ((users_table.user_id)::numeric OPERATOR(pg_catalog.=) level_3.avg) GROUP BY level_3.avg +DEBUG: generating subplan XXX_4 for subquery SELECT avg(events_table.event_type) AS avg_ev_type FROM (SELECT intermediate_result.mx_val_1 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(mx_val_1 integer)) level_4, public.events_table WHERE (level_4.mx_val_1 OPERATOR(pg_catalog.=) events_table.user_id) GROUP BY level_4.mx_val_1 +DEBUG: generating subplan XXX_5 for subquery SELECT min(users_table.value_1) AS min FROM (SELECT intermediate_result.avg_ev_type FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(avg_ev_type numeric)) level_5, public.users_table WHERE (level_5.avg_ev_type OPERATOR(pg_catalog.=) (users_table.user_id)::numeric) GROUP BY level_5.avg_ev_type +DEBUG: generating subplan XXX_6 for subquery SELECT avg(level_6.min) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer)) level_6, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_6.min) GROUP BY users_table.value_1 +DEBUG: generating subplan XXX_7 for subquery SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.count FROM read_intermediate_result('XXX_7'::text, 'binary'::citus_copy_format) intermediate_result(count bigint))) + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- (0 rows) SET client_min_messages TO DEFAULT; diff --git a/src/test/regress/expected/subqueries_not_supported.out b/src/test/regress/expected/subqueries_not_supported.out index 3eab8552a..734d801fa 100644 --- a/src/test/regress/expected/subqueries_not_supported.out +++ b/src/test/regress/expected/subqueries_not_supported.out @@ -6,35 +6,33 @@ SET search_path TO not_supported, public; SET client_min_messages TO DEBUG1; CREATE TABLE users_table_local AS SELECT * FROM users_table; -- we don't support subqueries with local tables when they are not leaf queries -SELECT - * +SELECT + * FROM ( - SELECT - users_table_local.user_id - FROM + SELECT + users_table_local.user_id + FROM users_table_local, (SELECT user_id FROM events_table) as evs WHERE users_table_local.user_id = evs.user_id ) as foo; ERROR: relation users_table_local is not distributed RESET client_min_messages; -- we don't support subqueries with local tables when they are not leaf queries -SELECT user_id FROM users_table WHERE user_id IN - (SELECT - user_id - FROM - users_table_local JOIN (SELECT user_id FROM events_table_local) as foo +SELECT user_id FROM users_table WHERE user_id IN + (SELECT + user_id + FROM + users_table_local JOIN (SELECT user_id FROM events_table_local) as foo USING (user_id) ); ERROR: relation "events_table_local" does not exist -LINE 5: users_table_local JOIN (SELECT user_id FROM events_table_... - ^ SET client_min_messages TO DEBUG1; -- we don't support aggregate distinct if the group by is not on partition key, expect for count distinct -- thus baz and bar are recursively planned but not foo -SELECT - * -FROM +SELECT + * +FROM ( SELECT avg(DISTINCT value_1), random() FROM users_table GROUP BY user_id OFFSET 3 ) as baz, @@ -44,21 +42,21 @@ FROM ( SELECT avg(DISTINCT value_1), random() FROM users_table GROUP BY value_2 OFFSET 3 ) as foo; -DEBUG: generating subplan 4_1 for subquery SELECT avg(DISTINCT value_1) AS avg, random() AS random FROM public.users_table GROUP BY user_id OFFSET 3 -DEBUG: generating subplan 4_2 for subquery SELECT count(DISTINCT value_1) AS count, random() AS random FROM public.users_table GROUP BY value_2 OFFSET 3 +DEBUG: generating subplan XXX_1 for subquery SELECT avg(DISTINCT value_1) AS avg, random() AS random FROM public.users_table GROUP BY user_id OFFSET 3 +DEBUG: generating subplan XXX_2 for subquery SELECT count(DISTINCT value_1) AS count, random() AS random FROM public.users_table GROUP BY value_2 OFFSET 3 ERROR: cannot compute aggregate (distinct) DETAIL: table partitioning is unsuitable for aggregate (distinct) -- we don't support array_aggs with ORDER BYs -SELECT - * +SELECT + * FROM ( - SELECT - array_agg(users_table.value_2 ORDER BY users_table.time) - FROM + SELECT + array_agg(users_table.value_2 ORDER BY users_table.time) + FROM users_table, (SELECT user_id FROM events_table) as evs WHERE users_table.user_id = evs.user_id - GROUP BY users_table.value_2 + GROUP BY users_table.value_2 LIMIT 5 ) as foo; ERROR: array_agg with order by is unsupported @@ -67,25 +65,25 @@ SET citus.enable_router_execution TO false; SELECT user_id FROM - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 ) as foo ORDER BY 1 DESC; DEBUG: push down of limit count: 5 -DEBUG: generating subplan 10_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 -DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo ORDER BY user_id DESC +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo ORDER BY user_id DESC ERROR: cannot handle complex subqueries when the router executor is disabled SET citus.enable_router_execution TO true; -- window functions are not allowed if they're not partitioned on the distribution column -SELECT - * -FROM +SELECT + * +FROM ( SELECT user_id, time, rnk @@ -103,35 +101,35 @@ LIMIT 10) as foo; ERROR: could not run distributed query because the window function that is used cannot be pushed down HINT: Window functions are supported in two ways. Either add an equality filter on the distributed tables' partition column or use the window functions with a PARTITION BY clause containing the distribution column --- OUTER JOINs where the outer part is recursively planned and not the other way +-- OUTER JOINs where the outer part is recursively planned and not the other way -- around is not supported SELECT foo.value_2 FROM - (SELECT users_table.value_2 FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) LIMIT 5) as foo + (SELECT users_table.value_2 FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) LIMIT 5) as foo LEFT JOIN (SELECT users_table.value_2 FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8)) as bar ON(foo.value_2 = bar.value_2); DEBUG: push down of limit count: 5 -DEBUG: generating subplan 14_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) LIMIT 5 -DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT foo.value_2 FROM ((SELECT intermediate_result.value_2 FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo LEFT JOIN (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar ON ((foo.value_2 OPERATOR(pg_catalog.=) bar.value_2))) +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.value_2 FROM ((SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo LEFT JOIN (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar ON ((foo.value_2 OPERATOR(pg_catalog.=) bar.value_2))) ERROR: cannot pushdown the subquery DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer join -- Aggregates in subquery without partition column can be planned recursively -- unless there is a reference to an outer query SELECT - * + * FROM - users_table + users_table WHERE - user_id IN + user_id IN ( SELECT - SUM(events_table.user_id) + SUM(events_table.user_id) FROM - events_table + events_table WHERE - users_table.user_id = events_table.user_id + users_table.user_id = events_table.user_id ) ; ERROR: cannot push down this subquery @@ -139,20 +137,20 @@ DETAIL: Aggregates without group by are currently unsupported when a subquery r -- Having qual without group by on partition column can be planned recursively -- unless there is a reference to an outer query SELECT - * + * FROM - users_table + users_table WHERE - user_id IN + user_id IN ( SELECT - SUM(events_table.user_id) + SUM(events_table.user_id) FROM - events_table + events_table WHERE - events_table.user_id = users_table.user_id + events_table.user_id = users_table.user_id HAVING - MIN(value_2) > 2 + MIN(value_2) > 2 ) ; ERROR: cannot push down this subquery diff --git a/src/test/regress/expected/subquery_and_cte.out b/src/test/regress/expected/subquery_and_cte.out index 1699905ff..ea6b28828 100644 --- a/src/test/regress/expected/subquery_and_cte.out +++ b/src/test/regress/expected/subquery_and_cte.out @@ -5,9 +5,9 @@ SET search_path TO subquery_and_ctes; CREATE TABLE users_table_local AS SELECT * FROM users_table; CREATE TABLE dist_table (id int, value int); SELECT create_distributed_table('dist_table', 'id', colocate_with => 'users_table'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO dist_table (id, value) VALUES(1, 2),(2, 3),(3,4); @@ -40,15 +40,15 @@ FROM ORDER BY 1 DESC LIMIT 5 ) as foo WHERE foo.user_id = cte.user_id; -DEBUG: generating subplan 3_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) -DEBUG: generating subplan 4_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local -DEBUG: generating subplan 4_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table -DEBUG: Plan 4 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('4_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local +DEBUG: generating subplan XXX_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) DEBUG: push down of limit count: 5 -DEBUG: generating subplan 3_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 -DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id) - count -------- +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id) + count +--------------------------------------------------------------------- 1644 (1 row) @@ -62,8 +62,8 @@ SELECT cte1.user_id, cte1.value_1, cte2.user_id, cte2.event_type FROM cte1, cte2 ORDER BY cte1.user_id, cte1.value_1, cte2.user_id, cte2.event_type LIMIT 5; - user_id | value_1 | user_id | event_type ----------+---------+---------+------------ + user_id | value_1 | user_id | event_type +--------------------------------------------------------------------- 1 | 1 | 1 | 0 1 | 1 | 1 | 0 1 | 1 | 1 | 1 @@ -81,11 +81,11 @@ SELECT cte1.user_id, cte1.value_1, cte2.user_id, cte2.user_id FROM cte1, cte2 ORDER BY cte1.user_id, cte1.value_1, cte2.user_id, cte2.event_type LIMIT 5; -DEBUG: generating subplan 8_1 for CTE cte1: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table WHERE (user_id OPERATOR(pg_catalog.=) 1) -DEBUG: generating subplan 8_2 for CTE cte2: SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM subquery_and_ctes.events_table WHERE (user_id OPERATOR(pg_catalog.=) 6) -DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT cte1.user_id, cte1.value_1, cte2.user_id, cte2.user_id FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) cte1, (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint)) cte2 ORDER BY cte1.user_id, cte1.value_1, cte2.user_id, cte2.event_type LIMIT 5 - user_id | value_1 | user_id | user_id ----------+---------+---------+--------- +DEBUG: generating subplan XXX_1 for CTE cte1: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table WHERE (user_id OPERATOR(pg_catalog.=) 1) +DEBUG: generating subplan XXX_2 for CTE cte2: SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM subquery_and_ctes.events_table WHERE (user_id OPERATOR(pg_catalog.=) 6) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte1.user_id, cte1.value_1, cte2.user_id, cte2.user_id FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) cte1, (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint)) cte2 ORDER BY cte1.user_id, cte1.value_1, cte2.user_id, cte2.event_type LIMIT 5 + user_id | value_1 | user_id | user_id +--------------------------------------------------------------------- 1 | 1 | 6 | 6 1 | 1 | 6 | 6 1 | 1 | 6 | 6 @@ -115,15 +115,15 @@ WITH cte1 AS ( ) UPDATE dist_table dt SET value = cte1.value_1 + cte2.event_type FROM cte1, cte2 WHERE cte1.user_id = dt.id AND dt.id = 1; -DEBUG: generating subplan 13_1 for CTE cte1: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table WHERE (user_id OPERATOR(pg_catalog.=) 1) -DEBUG: generating subplan 13_2 for CTE cte2: SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM subquery_and_ctes.events_table WHERE (user_id OPERATOR(pg_catalog.=) 6) -DEBUG: Plan 13 query after replacing subqueries and CTEs: UPDATE subquery_and_ctes.dist_table dt SET value = (cte1.value_1 OPERATOR(pg_catalog.+) cte2.event_type) FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) cte1, (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('13_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint)) cte2 WHERE ((cte1.user_id OPERATOR(pg_catalog.=) dt.id) AND (dt.id OPERATOR(pg_catalog.=) 1)) +DEBUG: generating subplan XXX_1 for CTE cte1: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table WHERE (user_id OPERATOR(pg_catalog.=) 1) +DEBUG: generating subplan XXX_2 for CTE cte2: SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM subquery_and_ctes.events_table WHERE (user_id OPERATOR(pg_catalog.=) 6) +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE subquery_and_ctes.dist_table dt SET value = (cte1.value_1 OPERATOR(pg_catalog.+) cte2.event_type) FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) cte1, (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint)) cte2 WHERE ((cte1.user_id OPERATOR(pg_catalog.=) dt.id) AND (dt.id OPERATOR(pg_catalog.=) 1)) -- volatile function calls should not be routed WITH cte1 AS (SELECT id, value FROM func()) UPDATE dist_table dt SET value = cte1.value FROM cte1 WHERE dt.id = 1; -DEBUG: generating subplan 16_1 for CTE cte1: SELECT id, value FROM subquery_and_ctes.func() func(id, value) -DEBUG: Plan 16 query after replacing subqueries and CTEs: UPDATE subquery_and_ctes.dist_table dt SET value = cte1.value FROM (SELECT intermediate_result.id, intermediate_result.value FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, value integer)) cte1 WHERE (dt.id OPERATOR(pg_catalog.=) 1) +DEBUG: generating subplan XXX_1 for CTE cte1: SELECT id, value FROM subquery_and_ctes.func() func(id, value) +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE subquery_and_ctes.dist_table dt SET value = cte1.value FROM (SELECT intermediate_result.id, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, value integer)) cte1 WHERE (dt.id OPERATOR(pg_catalog.=) 1) -- CTEs are recursively planned, and subquery foo is also recursively planned -- final plan becomes a real-time plan since we also have events_table in the -- range table entries @@ -150,15 +150,15 @@ FROM ORDER BY 1 DESC LIMIT 5 ) as foo, events_table WHERE foo.user_id = cte.user_id AND events_table.user_id = cte.user_id; -DEBUG: generating subplan 17_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) -DEBUG: generating subplan 18_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local -DEBUG: generating subplan 18_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table -DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('18_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local +DEBUG: generating subplan XXX_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) DEBUG: push down of limit count: 5 -DEBUG: generating subplan 17_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 -DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('17_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, subquery_and_ctes.events_table WHERE ((foo.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (events_table.user_id OPERATOR(pg_catalog.=) cte.user_id)) - count -------- +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, subquery_and_ctes.events_table WHERE ((foo.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (events_table.user_id OPERATOR(pg_catalog.=) cte.user_id)) + count +--------------------------------------------------------------------- 30608 (1 row) @@ -180,15 +180,15 @@ WHERE users_table.user_id = cte.user_id AND users_table.user_id IN (SELECT DISTINCT value_2 FROM users_table WHERE value_1 >= 1 AND value_1 <= 20 ORDER BY 1 LIMIT 5) ORDER BY 1 DESC; -DEBUG: generating subplan 21_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) -DEBUG: generating subplan 22_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local -DEBUG: generating subplan 22_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table -DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('22_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local +DEBUG: generating subplan XXX_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) DEBUG: push down of limit count: 5 -DEBUG: generating subplan 21_2 for subquery SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5 -DEBUG: Plan 21 query after replacing subqueries and CTEs: SELECT DISTINCT cte.user_id FROM subquery_and_ctes.users_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('21_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE ((users_table.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('21_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) ORDER BY cte.user_id DESC - user_id ---------- +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT cte.user_id FROM subquery_and_ctes.users_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE ((users_table.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) ORDER BY cte.user_id DESC + user_id +--------------------------------------------------------------------- 4 3 2 @@ -211,14 +211,14 @@ FROM cte WHERE cte.user_id IN (SELECT DISTINCT user_id FROM users_table WHERE value_1 >= 1 AND value_1 <= 20) ORDER BY 1 DESC; -DEBUG: generating subplan 25_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) -DEBUG: generating subplan 26_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local -DEBUG: generating subplan 26_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table -DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('26_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) -DEBUG: generating subplan 25_2 for subquery SELECT DISTINCT user_id FROM subquery_and_ctes.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) -DEBUG: Plan 25 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('25_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('25_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) ORDER BY user_id DESC - user_id ---------- +DEBUG: generating subplan XXX_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local +DEBUG: generating subplan XXX_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT user_id FROM subquery_and_ctes.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) ORDER BY user_id DESC + user_id +--------------------------------------------------------------------- 6 5 4 @@ -244,10 +244,10 @@ FROM ) SELECT * FROM cte ORDER BY 1 DESC ) as foo ORDER BY 1 DESC; -DEBUG: generating subplan 29_1 for CTE cte: SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) -DEBUG: Plan 29 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('29_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo ORDER BY user_id DESC - user_id ---------- +DEBUG: generating subplan XXX_1 for CTE cte: SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo ORDER BY user_id DESC + user_id +--------------------------------------------------------------------- 6 5 4 @@ -283,10 +283,10 @@ FROM ) as bar WHERE foo.user_id = bar.user_id ORDER BY 1 DESC; -DEBUG: generating subplan 31_1 for CTE cte: SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) -DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT bar.user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo, (SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC - user_id ---------- +DEBUG: generating subplan XXX_1 for CTE cte: SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT bar.user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo, (SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC + user_id +--------------------------------------------------------------------- 6 5 4 @@ -335,13 +335,13 @@ FROM ) as bar WHERE foo.user_id = bar.user_id ORDER BY 1 DESC LIMIT 5; -DEBUG: generating subplan 33_1 for CTE cte: SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) -DEBUG: generating subplan 33_2 for CTE cte: SELECT events_table.event_type, users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table.value_1 OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2]))) +DEBUG: generating subplan XXX_1 for CTE cte: SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) +DEBUG: generating subplan XXX_2 for CTE cte: SELECT events_table.event_type, users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table.value_1 OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2]))) DEBUG: push down of limit count: 2 -DEBUG: generating subplan 33_3 for subquery SELECT users_table.user_id, some_events.event_type FROM subquery_and_ctes.users_table, (SELECT cte.event_type, cte.user_id FROM (SELECT intermediate_result.event_type, intermediate_result.user_id FROM read_intermediate_result('33_2'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer, user_id integer)) cte ORDER BY cte.event_type DESC) some_events WHERE ((users_table.user_id OPERATOR(pg_catalog.=) some_events.user_id) AND (some_events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY some_events.event_type, users_table.user_id LIMIT 2 -DEBUG: Plan 33 query after replacing subqueries and CTEs: SELECT DISTINCT bar.user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('33_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo, (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('33_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC LIMIT 5 - user_id ---------- +DEBUG: generating subplan XXX_3 for subquery SELECT users_table.user_id, some_events.event_type FROM subquery_and_ctes.users_table, (SELECT cte.event_type, cte.user_id FROM (SELECT intermediate_result.event_type, intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer, user_id integer)) cte ORDER BY cte.event_type DESC) some_events WHERE ((users_table.user_id OPERATOR(pg_catalog.=) some_events.user_id) AND (some_events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY some_events.event_type, users_table.user_id LIMIT 2 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT bar.user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo, (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC LIMIT 5 + user_id +--------------------------------------------------------------------- 1 (1 row) @@ -372,22 +372,22 @@ SELECT * FROM foo.user_id = events_table.value_2 ORDER BY 3 DESC, 2 DESC, 1 DESC LIMIT 5; -DEBUG: generating subplan 37_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) -DEBUG: generating subplan 38_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local -DEBUG: generating subplan 38_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table -DEBUG: Plan 38 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('38_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('38_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) -DEBUG: generating subplan 37_2 for CTE cte_in_where: SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5 +DEBUG: generating subplan XXX_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local +DEBUG: generating subplan XXX_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_2 for CTE cte_in_where: SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5 DEBUG: push down of limit count: 5 -DEBUG: generating subplan 37_3 for subquery SELECT DISTINCT cte.user_id FROM subquery_and_ctes.users_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('37_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE ((users_table.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT cte_in_where.value_2 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('37_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) cte_in_where))) ORDER BY cte.user_id DESC -DEBUG: Plan 37 query after replacing subqueries and CTEs: SELECT foo.user_id, events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4 FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('37_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, subquery_and_ctes.events_table WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.value_2) ORDER BY events_table."time" DESC, events_table.user_id DESC, foo.user_id DESC LIMIT 5 +DEBUG: generating subplan XXX_3 for subquery SELECT DISTINCT cte.user_id FROM subquery_and_ctes.users_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE ((users_table.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT cte_in_where.value_2 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) cte_in_where))) ORDER BY cte.user_id DESC +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id, events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4 FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, subquery_and_ctes.events_table WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.value_2) ORDER BY events_table."time" DESC, events_table.user_id DESC, foo.user_id DESC LIMIT 5 DEBUG: push down of limit count: 5 - user_id | user_id | time | event_type | value_2 | value_3 | value_4 ----------+---------+---------------------------------+------------+---------+---------+--------- - 4 | 1 | Thu Nov 23 21:54:46.924477 2017 | 6 | 4 | 5 | - 2 | 4 | Thu Nov 23 18:10:21.338399 2017 | 1 | 2 | 4 | - 4 | 3 | Thu Nov 23 18:08:26.550729 2017 | 2 | 4 | 3 | - 2 | 3 | Thu Nov 23 16:44:41.903713 2017 | 4 | 2 | 2 | - 1 | 3 | Thu Nov 23 16:31:56.219594 2017 | 5 | 1 | 2 | + user_id | user_id | time | event_type | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 4 | 1 | Thu Nov 23 21:54:46.924477 2017 | 6 | 4 | 5 | + 2 | 4 | Thu Nov 23 18:10:21.338399 2017 | 1 | 2 | 4 | + 4 | 3 | Thu Nov 23 18:08:26.550729 2017 | 2 | 4 | 3 | + 2 | 3 | Thu Nov 23 16:44:41.903713 2017 | 4 | 2 | 2 | + 1 | 3 | Thu Nov 23 16:31:56.219594 2017 | 5 | 1 | 2 | (5 rows) -- now recursively plan subqueries inside the CTEs that contains LIMIT and OFFSET @@ -421,19 +421,19 @@ FROM ORDER BY 1 DESC LIMIT 5 ) as foo WHERE foo.user_id = cte.user_id; -DEBUG: generating subplan 42_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT DISTINCT users_table.value_2 FROM subquery_and_ctes.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM subquery_and_ctes.users_table ORDER BY users_table.value_1 LIMIT 3)))) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) -DEBUG: generating subplan 43_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local -DEBUG: generating subplan 43_2 for CTE dist_cte: SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT DISTINCT users_table.value_2 FROM subquery_and_ctes.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM subquery_and_ctes.users_table ORDER BY users_table.value_1 LIMIT 3))) +DEBUG: generating subplan XXX_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT DISTINCT users_table.value_2 FROM subquery_and_ctes.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM subquery_and_ctes.users_table ORDER BY users_table.value_1 LIMIT 3)))) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local +DEBUG: generating subplan XXX_2 for CTE dist_cte: SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT DISTINCT users_table.value_2 FROM subquery_and_ctes.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM subquery_and_ctes.users_table ORDER BY users_table.value_1 LIMIT 3))) DEBUG: push down of limit count: 3 -DEBUG: generating subplan 44_1 for subquery SELECT DISTINCT value_1 FROM subquery_and_ctes.users_table ORDER BY value_1 LIMIT 3 -DEBUG: generating subplan 44_2 for subquery SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table OFFSET 0 -DEBUG: Plan 44 query after replacing subqueries and CTEs: SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT intermediate_result.value_2 FROM read_intermediate_result('44_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_1 FROM read_intermediate_result('44_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)))) -DEBUG: Plan 43 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('43_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('43_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT value_1 FROM subquery_and_ctes.users_table ORDER BY value_1 LIMIT 3 +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table OFFSET 0 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) DEBUG: push down of limit count: 5 -DEBUG: generating subplan 42_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 -DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('42_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id) - count -------- +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id) + count +--------------------------------------------------------------------- 432 (1 row) @@ -475,26 +475,26 @@ FROM ) as foo, users_table WHERE foo.cnt > users_table.value_2 ORDER BY 3 DESC, 1 DESC, 2 DESC, 4 DESC LIMIT 5; -DEBUG: generating subplan 48_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT DISTINCT users_table.value_2 FROM subquery_and_ctes.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM subquery_and_ctes.users_table ORDER BY users_table.value_1 LIMIT 3)))) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) -DEBUG: generating subplan 49_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local -DEBUG: generating subplan 49_2 for CTE dist_cte: SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT DISTINCT users_table.value_2 FROM subquery_and_ctes.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM subquery_and_ctes.users_table ORDER BY users_table.value_1 LIMIT 3))) +DEBUG: generating subplan XXX_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT DISTINCT users_table.value_2 FROM subquery_and_ctes.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM subquery_and_ctes.users_table ORDER BY users_table.value_1 LIMIT 3)))) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local +DEBUG: generating subplan XXX_2 for CTE dist_cte: SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT DISTINCT users_table.value_2 FROM subquery_and_ctes.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM subquery_and_ctes.users_table ORDER BY users_table.value_1 LIMIT 3))) DEBUG: push down of limit count: 3 -DEBUG: generating subplan 50_1 for subquery SELECT DISTINCT value_1 FROM subquery_and_ctes.users_table ORDER BY value_1 LIMIT 3 -DEBUG: generating subplan 50_2 for subquery SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table OFFSET 0 -DEBUG: Plan 50 query after replacing subqueries and CTEs: SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT intermediate_result.value_2 FROM read_intermediate_result('50_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_1 FROM read_intermediate_result('50_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)))) -DEBUG: Plan 49 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('49_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('49_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT value_1 FROM subquery_and_ctes.users_table ORDER BY value_1 LIMIT 3 +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table OFFSET 0 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) DEBUG: push down of limit count: 5 -DEBUG: generating subplan 48_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 -DEBUG: generating subplan 48_3 for subquery SELECT count(*) AS cnt FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('48_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('48_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id) -DEBUG: Plan 48 query after replacing subqueries and CTEs: SELECT foo.cnt, users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4 FROM (SELECT intermediate_result.cnt FROM read_intermediate_result('48_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) foo, subquery_and_ctes.users_table WHERE (foo.cnt OPERATOR(pg_catalog.>) users_table.value_2) ORDER BY users_table."time" DESC, foo.cnt DESC, users_table.user_id DESC, users_table.value_1 DESC LIMIT 5 +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 +DEBUG: generating subplan XXX_3 for subquery SELECT count(*) AS cnt FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.cnt, users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4 FROM (SELECT intermediate_result.cnt FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) foo, subquery_and_ctes.users_table WHERE (foo.cnt OPERATOR(pg_catalog.>) users_table.value_2) ORDER BY users_table."time" DESC, foo.cnt DESC, users_table.user_id DESC, users_table.value_1 DESC LIMIT 5 DEBUG: push down of limit count: 5 - cnt | user_id | time | value_1 | value_2 | value_3 | value_4 ------+---------+---------------------------------+---------+---------+---------+--------- - 432 | 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 432 | 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 432 | 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | - 432 | 3 | Thu Nov 23 17:10:35.959913 2017 | 4 | 3 | 1 | - 432 | 5 | Thu Nov 23 16:48:32.08896 2017 | 5 | 2 | 1 | + cnt | user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 432 | 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 432 | 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 432 | 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | + 432 | 3 | Thu Nov 23 17:10:35.959913 2017 | 4 | 3 | 1 | + 432 | 5 | Thu Nov 23 16:48:32.08896 2017 | 5 | 2 | 1 | (5 rows) -- recursive CTES are not supported inside subqueries as well diff --git a/src/test/regress/expected/subquery_basics.out b/src/test/regress/expected/subquery_basics.out index 2062ceaf3..d43a48f0b 100644 --- a/src/test/regress/expected/subquery_basics.out +++ b/src/test/regress/expected/subquery_basics.out @@ -6,21 +6,21 @@ SET client_min_messages TO DEBUG1; SELECT user_id FROM - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 ) as foo ORDER BY 1 DESC; DEBUG: push down of limit count: 5 -DEBUG: generating subplan 1_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 -DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo ORDER BY user_id DESC - user_id ---------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo ORDER BY user_id DESC + user_id +--------------------------------------------------------------------- 6 5 4 @@ -33,20 +33,20 @@ DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT user_id FROM (S SELECT * FROM - (SELECT - DISTINCT users_table.value_1 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.value_1 + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 ) as foo ORDER BY 1 DESC; -DEBUG: generating subplan 3_1 for subquery SELECT DISTINCT users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.value_1 -DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT value_1 FROM (SELECT intermediate_result.value_1 FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) foo ORDER BY value_1 DESC - value_1 ---------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.value_1 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT value_1 FROM (SELECT intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) foo ORDER BY value_1 DESC + value_1 +--------------------------------------------------------------------- 5 4 3 @@ -60,21 +60,21 @@ DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT value_1 FROM (S SELECT * FROM - (SELECT + (SELECT users_table.value_2, avg(value_1) - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) GROUP BY users_table.value_2 ORDER BY 1 DESC ) as foo ORDER BY 2 DESC, 1; -DEBUG: generating subplan 5_1 for subquery SELECT users_table.value_2, avg(users_table.value_1) AS avg FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC -DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT value_2, avg FROM (SELECT intermediate_result.value_2, intermediate_result.avg FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, avg numeric)) foo ORDER BY avg DESC, value_2 - value_2 | avg ----------+-------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.value_2, avg(users_table.value_1) AS avg FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT value_2, avg FROM (SELECT intermediate_result.value_2, intermediate_result.avg FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, avg numeric)) foo ORDER BY avg DESC, value_2 + value_2 | avg +--------------------------------------------------------------------- 4 | 2.8453608247422680 2 | 2.6833855799373041 5 | 2.6238938053097345 @@ -87,10 +87,10 @@ DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT value_2, avg FR SELECT * FROM - (SELECT + (SELECT events_table.value_2 - FROM - events_table + FROM + events_table WHERE event_type IN (1,2,3,4) ORDER BY 1 DESC @@ -102,10 +102,10 @@ FROM WHERE foo.value_2 = bar.i ORDER BY 2 DESC, 1; DEBUG: push down of limit count: 5 -DEBUG: generating subplan 7_1 for subquery SELECT value_2 FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) ORDER BY value_2 DESC LIMIT 5 -DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT foo.value_2, bar.i FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT i.i FROM generate_series(0, 100) i(i)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.i) ORDER BY bar.i DESC, foo.value_2 - value_2 | i ----------+--- +DEBUG: generating subplan XXX_1 for subquery SELECT value_2 FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) ORDER BY value_2 DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.value_2, bar.i FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT i.i FROM generate_series(0, 100) i(i)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.i) ORDER BY bar.i DESC, foo.value_2 + value_2 | i +--------------------------------------------------------------------- 5 | 5 5 | 5 5 | 5 @@ -117,17 +117,17 @@ DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT foo.value_2, ba SELECT * FROM - (SELECT + (SELECT count(*) - FROM - events_table + FROM + events_table WHERE event_type IN (1,2,3,4) ) as foo; -DEBUG: generating subplan 9_1 for subquery SELECT count(*) AS count FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) -DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) foo - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT count(*) AS count FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) foo + count +--------------------------------------------------------------------- 87 (1 row) @@ -135,19 +135,19 @@ DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT count FROM (SEL SELECT * FROM - (SELECT - SUM(events_table.user_id) - FROM - events_table + (SELECT + SUM(events_table.user_id) + FROM + events_table WHERE event_type IN (1,2,3,4) - HAVING + HAVING MIN(value_2) > 2 ) as foo; -DEBUG: generating subplan 11_1 for subquery SELECT sum(user_id) AS sum FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) HAVING (min(value_2) OPERATOR(pg_catalog.>) 2) -DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT sum FROM (SELECT intermediate_result.sum FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint)) foo - sum ------ +DEBUG: generating subplan XXX_1 for subquery SELECT sum(user_id) AS sum FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) HAVING (min(value_2) OPERATOR(pg_catalog.>) 2) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT sum FROM (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint)) foo + sum +--------------------------------------------------------------------- (0 rows) -- multiple subqueries in FROM clause should be replaced @@ -155,33 +155,33 @@ DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT sum FROM (SELE SELECT * FROM - (SELECT + (SELECT users_table.value_2 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) GROUP BY users_table.value_2 ORDER BY 1 DESC ) as foo, - (SELECT + (SELECT users_table.value_3 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8) GROUP BY users_table.value_3 ORDER BY 1 DESC ) as bar WHERE foo.value_2 = bar.value_3 ORDER BY 2 DESC, 1; -DEBUG: generating subplan 13_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC -DEBUG: generating subplan 13_2 for subquery SELECT users_table.value_3 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) GROUP BY users_table.value_3 ORDER BY users_table.value_3 DESC -DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT foo.value_2, bar.value_3 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.value_3 FROM read_intermediate_result('13_2'::text, 'binary'::citus_copy_format) intermediate_result(value_3 double precision)) bar WHERE ((foo.value_2)::double precision OPERATOR(pg_catalog.=) bar.value_3) ORDER BY bar.value_3 DESC, foo.value_2 - value_2 | value_3 ----------+--------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC +DEBUG: generating subplan XXX_2 for subquery SELECT users_table.value_3 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) GROUP BY users_table.value_3 ORDER BY users_table.value_3 DESC +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.value_2, bar.value_3 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.value_3 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_3 double precision)) bar WHERE ((foo.value_2)::double precision OPERATOR(pg_catalog.=) bar.value_3) ORDER BY bar.value_3 DESC, foo.value_2 + value_2 | value_3 +--------------------------------------------------------------------- 5 | 5 4 | 4 3 | 3 @@ -194,33 +194,33 @@ DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT foo.value_2, b SELECT DISTINCT ON (citus) citus, postgres, citus + 1 as c1, postgres-1 as p1 FROM - (SELECT + (SELECT users_table.value_2 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) GROUP BY users_table.value_2 ORDER BY 1 DESC ) as foo(postgres), - (SELECT + (SELECT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8) ORDER BY 1 DESC ) as bar (citus) WHERE foo.postgres = bar.citus ORDER BY 1 DESC, 2 DESC LIMIT 3; -DEBUG: generating subplan 16_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC -DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT DISTINCT ON (bar.citus) bar.citus, foo.postgres, (bar.citus OPERATOR(pg_catalog.+) 1) AS c1, (foo.postgres OPERATOR(pg_catalog.-) 1) AS p1 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo(postgres), (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) ORDER BY users_table.user_id DESC) bar(citus) WHERE (foo.postgres OPERATOR(pg_catalog.=) bar.citus) ORDER BY bar.citus DESC, foo.postgres DESC LIMIT 3 +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT ON (bar.citus) bar.citus, foo.postgres, (bar.citus OPERATOR(pg_catalog.+) 1) AS c1, (foo.postgres OPERATOR(pg_catalog.-) 1) AS p1 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo(postgres), (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) ORDER BY users_table.user_id DESC) bar(citus) WHERE (foo.postgres OPERATOR(pg_catalog.=) bar.citus) ORDER BY bar.citus DESC, foo.postgres DESC LIMIT 3 DEBUG: push down of limit count: 3 - citus | postgres | c1 | p1 --------+----------+----+---- + citus | postgres | c1 | p1 +--------------------------------------------------------------------- 5 | 5 | 6 | 4 4 | 4 | 5 | 3 3 | 3 | 4 | 2 @@ -231,33 +231,33 @@ DEBUG: push down of limit count: 3 SELECT * FROM - (SELECT + (SELECT users_table.value_2 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) GROUP BY users_table.value_2 ORDER BY 1 DESC ) as foo, - (SELECT + (SELECT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8) ORDER BY 1 DESC ) as bar WHERE foo.value_2 = bar.user_id ORDER BY 2 DESC, 1 DESC LIMIT 3; -DEBUG: generating subplan 18_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC -DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT foo.value_2, bar.user_id FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) ORDER BY users_table.user_id DESC) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC, foo.value_2 DESC LIMIT 3 +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.value_2, bar.user_id FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) ORDER BY users_table.user_id DESC) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC, foo.value_2 DESC LIMIT 3 DEBUG: push down of limit count: 3 - value_2 | user_id ----------+--------- + value_2 | user_id +--------------------------------------------------------------------- 5 | 5 5 | 5 5 | 5 @@ -266,14 +266,14 @@ DEBUG: push down of limit count: 3 -- subqueries in WHERE should be replaced SELECT DISTINCT user_id FROM users_table -WHERE +WHERE user_id IN (SELECT DISTINCT value_2 FROM users_table WHERE value_1 >= 1 AND value_1 <= 20 ORDER BY 1 LIMIT 5) ORDER BY 1 DESC; DEBUG: push down of limit count: 5 -DEBUG: generating subplan 20_1 for subquery SELECT DISTINCT value_2 FROM public.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5 -DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) ORDER BY user_id DESC - user_id ---------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT value_2 FROM public.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) ORDER BY user_id DESC + user_id +--------------------------------------------------------------------- 4 3 2 @@ -281,29 +281,29 @@ DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT DISTINCT user_ (4 rows) -- subquery in FROM -> FROM -> FROM should be replaced due to OFFSET -SELECT - DISTINCT user_id -FROM +SELECT + DISTINCT user_id +FROM ( - SELECT users_table.user_id FROM users_table, + SELECT users_table.user_id FROM users_table, ( - SELECT + SELECT event_type, user_id FROM - (SELECT event_type, users_table.user_id FROM users_table, + (SELECT event_type, users_table.user_id FROM users_table, (SELECT user_id, event_type FROM events_table WHERE value_2 < 3 OFFSET 3) as foo WHERE foo.user_id = users_table.user_id ) bar ) as baz WHERE baz.user_id = users_table.user_id ) as sub1 - ORDER BY 1 DESC + ORDER BY 1 DESC LIMIT 3; -DEBUG: generating subplan 22_1 for subquery SELECT user_id, event_type FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.<) 3) OFFSET 3 -DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT users_table.user_id FROM public.users_table, (SELECT bar.event_type, bar.user_id FROM (SELECT foo.event_type, users_table_1.user_id FROM public.users_table users_table_1, (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) users_table_1.user_id)) bar) baz WHERE (baz.user_id OPERATOR(pg_catalog.=) users_table.user_id)) sub1 ORDER BY user_id DESC LIMIT 3 +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, event_type FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.<) 3) OFFSET 3 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT users_table.user_id FROM public.users_table, (SELECT bar.event_type, bar.user_id FROM (SELECT foo.event_type, users_table_1.user_id FROM public.users_table users_table_1, (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) users_table_1.user_id)) bar) baz WHERE (baz.user_id OPERATOR(pg_catalog.=) users_table.user_id)) sub1 ORDER BY user_id DESC LIMIT 3 DEBUG: push down of limit count: 3 - user_id ---------- + user_id +--------------------------------------------------------------------- 6 5 4 @@ -314,18 +314,18 @@ SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( - SELECT + SELECT u.user_id, e.event_type::text AS event, e.time - FROM + FROM users_table AS u, events_table AS e - WHERE u.user_id = e.user_id AND - u.user_id IN + WHERE u.user_id = e.user_id AND + u.user_id IN ( - SELECT - user_id - FROM - users_table + SELECT + user_id + FROM + users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 1 AND event_type <= 3 AND value_3 > 1 AND user_id = users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 3 AND event_type <= 4 AND value_3 > 1 AND user_id = users_table.user_id) @@ -336,18 +336,18 @@ FROM ( ) q ORDER BY 2 DESC, 1; DEBUG: push down of limit count: 5 -DEBUG: generating subplan 24_1 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<=) 3) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))))) LIMIT 5 -DEBUG: Plan 24 query after replacing subqueries and CTEs: SELECT user_id, array_length(events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('24_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q ORDER BY (array_length(events_table, 1)) DESC, user_id - user_id | array_length ----------+-------------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<=) 3) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))))) LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, array_length(events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q ORDER BY (array_length(events_table, 1)) DESC, user_id + user_id | array_length +--------------------------------------------------------------------- 5 | 364 (1 row) -- subquery (i.e., subquery_2) in WHERE->FROM should be replaced due to LIMIT -SELECT - user_id -FROM - users_table +SELECT + user_id +FROM + users_table WHERE user_id IN ( @@ -414,10 +414,10 @@ GROUP BY user_id HAVING count(*) > 1 AND sum(value_2) > 29 ORDER BY 1; DEBUG: push down of limit count: 10 -DEBUG: generating subplan 26_1 for subquery SELECT user_id, count(*) AS count_pay FROM public.users_table WHERE ((user_id OPERATOR(pg_catalog.>=) 1) AND (user_id OPERATOR(pg_catalog.<=) 3) AND (value_1 OPERATOR(pg_catalog.>) 3) AND (value_1 OPERATOR(pg_catalog.<) 5)) GROUP BY user_id HAVING (count(*) OPERATOR(pg_catalog.>) 1) LIMIT 10 -DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT subquery_top.user_id FROM (SELECT subquery_1.user_id, subquery_2.count_pay FROM ((SELECT users_table_1.user_id, 'action=>1'::text AS event, events_table."time" FROM public.users_table users_table_1, public.events_table WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.>=) 1) AND (users_table_1.user_id OPERATOR(pg_catalog.<=) 3) AND (events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<) 3)) UNION SELECT users_table_1.user_id, 'action=>2'::text AS event, events_table."time" FROM public.users_table users_table_1, public.events_table WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.>=) 1) AND (users_table_1.user_id OPERATOR(pg_catalog.<=) 3) AND (events_table.event_type OPERATOR(pg_catalog.>) 2) AND (events_table.event_type OPERATOR(pg_catalog.<) 4))) subquery_1 LEFT JOIN (SELECT intermediate_result.user_id, intermediate_result.count_pay FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, count_pay bigint)) subquery_2 ON ((subquery_1.user_id OPERATOR(pg_catalog.=) subquery_2.user_id))) GROUP BY subquery_1.user_id, subquery_2.count_pay) subquery_top GROUP BY subquery_top.count_pay, subquery_top.user_id)) GROUP BY user_id HAVING ((count(*) OPERATOR(pg_catalog.>) 1) AND (sum(value_2) OPERATOR(pg_catalog.>) 29)) ORDER BY user_id - user_id ---------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, count(*) AS count_pay FROM public.users_table WHERE ((user_id OPERATOR(pg_catalog.>=) 1) AND (user_id OPERATOR(pg_catalog.<=) 3) AND (value_1 OPERATOR(pg_catalog.>) 3) AND (value_1 OPERATOR(pg_catalog.<) 5)) GROUP BY user_id HAVING (count(*) OPERATOR(pg_catalog.>) 1) LIMIT 10 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT subquery_top.user_id FROM (SELECT subquery_1.user_id, subquery_2.count_pay FROM ((SELECT users_table_1.user_id, 'action=>1'::text AS event, events_table."time" FROM public.users_table users_table_1, public.events_table WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.>=) 1) AND (users_table_1.user_id OPERATOR(pg_catalog.<=) 3) AND (events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<) 3)) UNION SELECT users_table_1.user_id, 'action=>2'::text AS event, events_table."time" FROM public.users_table users_table_1, public.events_table WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.>=) 1) AND (users_table_1.user_id OPERATOR(pg_catalog.<=) 3) AND (events_table.event_type OPERATOR(pg_catalog.>) 2) AND (events_table.event_type OPERATOR(pg_catalog.<) 4))) subquery_1 LEFT JOIN (SELECT intermediate_result.user_id, intermediate_result.count_pay FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, count_pay bigint)) subquery_2 ON ((subquery_1.user_id OPERATOR(pg_catalog.=) subquery_2.user_id))) GROUP BY subquery_1.user_id, subquery_2.count_pay) subquery_top GROUP BY subquery_top.count_pay, subquery_top.user_id)) GROUP BY user_id HAVING ((count(*) OPERATOR(pg_catalog.>) 1) AND (sum(value_2) OPERATOR(pg_catalog.>) 29)) ORDER BY user_id + user_id +--------------------------------------------------------------------- 2 3 (2 rows) @@ -437,10 +437,10 @@ FROM LIMIT 5 ) as foo WHERE user_id IN (SELECT count(*) FROM users_table GROUP BY user_id); DEBUG: push down of limit count: 5 -DEBUG: generating subplan 28_1 for subquery SELECT users_table.user_id FROM public.users_table, (SELECT events_table.user_id FROM public.events_table) evs WHERE (users_table.user_id OPERATOR(pg_catalog.=) evs.user_id) ORDER BY users_table.user_id LIMIT 5 -DEBUG: generating subplan 28_2 for subquery SELECT count(*) AS count FROM public.users_table GROUP BY user_id -DEBUG: Plan 28 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('28_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.count FROM read_intermediate_result('28_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint))) - user_id ---------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, (SELECT events_table.user_id FROM public.events_table) evs WHERE (users_table.user_id OPERATOR(pg_catalog.=) evs.user_id) ORDER BY users_table.user_id LIMIT 5 +DEBUG: generating subplan XXX_2 for subquery SELECT count(*) AS count FROM public.users_table GROUP BY user_id +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.count FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint))) + user_id +--------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/subquery_complex_target_list.out b/src/test/regress/expected/subquery_complex_target_list.out index 582d98506..8090384f7 100644 --- a/src/test/regress/expected/subquery_complex_target_list.out +++ b/src/test/regress/expected/subquery_complex_target_list.out @@ -17,10 +17,10 @@ GROUP BY ORDER BY 1 DESC, 2 DESC LIMIT 3; DEBUG: push down of limit count: 20 -DEBUG: generating subplan 1_1 for subquery SELECT user_id FROM public.users_table GROUP BY user_id ORDER BY (count(*)) DESC LIMIT 20 -DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT event_type, count(DISTINCT value_2) AS count FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) GROUP BY event_type ORDER BY event_type DESC, (count(DISTINCT value_2)) DESC LIMIT 3 - event_type | count -------------+------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table GROUP BY user_id ORDER BY (count(*)) DESC LIMIT 20 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT event_type, count(DISTINCT value_2) AS count FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) GROUP BY event_type ORDER BY event_type DESC, (count(DISTINCT value_2)) DESC LIMIT 3 + event_type | count +--------------------------------------------------------------------- 6 | 1 5 | 3 4 | 6 @@ -33,10 +33,10 @@ FROM SELECT user_id, value_1, value_2 FROM users_table OFFSET 0 ) as foo(x, y) ORDER BY 1 DESC, 2 DESC, 3 DESC LIMIT 5; -DEBUG: generating subplan 3_1 for subquery SELECT user_id, value_1, value_2 FROM public.users_table OFFSET 0 -DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT x, y, value_2 FROM (SELECT intermediate_result.user_id, intermediate_result.value_1, intermediate_result.value_2 FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer, value_2 integer)) foo(x, y, value_2) ORDER BY x DESC, y DESC, value_2 DESC LIMIT 5 - x | y | value_2 ----+---+--------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, value_1, value_2 FROM public.users_table OFFSET 0 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y, value_2 FROM (SELECT intermediate_result.user_id, intermediate_result.value_1, intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer, value_2 integer)) foo(x, y, value_2) ORDER BY x DESC, y DESC, value_2 DESC LIMIT 5 + x | y | value_2 +--------------------------------------------------------------------- 6 | 5 | 2 6 | 5 | 0 6 | 3 | 2 @@ -68,15 +68,15 @@ FROM WHERE foo.avg != bar.cnt_1 AND baz.cnt_2 = events_table.event_type ORDER BY 1 DESC; DEBUG: push down of limit count: 3 -DEBUG: generating subplan 5_1 for subquery SELECT avg(DISTINCT user_id) AS avg FROM public.users_table ORDER BY (avg(DISTINCT user_id)) DESC LIMIT 3 +DEBUG: generating subplan XXX_1 for subquery SELECT avg(DISTINCT user_id) AS avg FROM public.users_table ORDER BY (avg(DISTINCT user_id)) DESC LIMIT 3 DEBUG: push down of limit count: 3 -DEBUG: generating subplan 5_2 for subquery SELECT count(DISTINCT user_id) AS cnt_1 FROM public.users_table ORDER BY (count(DISTINCT user_id)) DESC LIMIT 3 -DEBUG: generating subplan 5_3 for subquery SELECT count(DISTINCT value_2) AS cnt_2 FROM public.users_table ORDER BY (count(DISTINCT value_2)) DESC LIMIT 4 +DEBUG: generating subplan XXX_2 for subquery SELECT count(DISTINCT user_id) AS cnt_1 FROM public.users_table ORDER BY (count(DISTINCT user_id)) DESC LIMIT 3 +DEBUG: generating subplan XXX_3 for subquery SELECT count(DISTINCT value_2) AS cnt_2 FROM public.users_table ORDER BY (count(DISTINCT value_2)) DESC LIMIT 4 DEBUG: push down of limit count: 4 -DEBUG: generating subplan 5_4 for subquery SELECT user_id, sum(DISTINCT value_2) AS sum FROM public.users_table GROUP BY user_id ORDER BY user_id DESC LIMIT 4 -DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT DISTINCT ON (foo.avg) foo.avg, bar.cnt_1, baz.cnt_2, bat.sum FROM (SELECT intermediate_result.avg FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) foo, (SELECT intermediate_result.cnt_1 FROM read_intermediate_result('5_2'::text, 'binary'::citus_copy_format) intermediate_result(cnt_1 bigint)) bar, (SELECT intermediate_result.cnt_2 FROM read_intermediate_result('5_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt_2 bigint)) baz, (SELECT intermediate_result.user_id, intermediate_result.sum FROM read_intermediate_result('5_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, sum bigint)) bat, public.events_table WHERE ((foo.avg OPERATOR(pg_catalog.<>) (bar.cnt_1)::numeric) AND (baz.cnt_2 OPERATOR(pg_catalog.=) events_table.event_type)) ORDER BY foo.avg DESC - avg | cnt_1 | cnt_2 | sum ---------------------+-------+-------+----- +DEBUG: generating subplan XXX_4 for subquery SELECT user_id, sum(DISTINCT value_2) AS sum FROM public.users_table GROUP BY user_id ORDER BY user_id DESC LIMIT 4 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT ON (foo.avg) foo.avg, bar.cnt_1, baz.cnt_2, bat.sum FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) foo, (SELECT intermediate_result.cnt_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(cnt_1 bigint)) bar, (SELECT intermediate_result.cnt_2 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt_2 bigint)) baz, (SELECT intermediate_result.user_id, intermediate_result.sum FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, sum bigint)) bat, public.events_table WHERE ((foo.avg OPERATOR(pg_catalog.<>) (bar.cnt_1)::numeric) AND (baz.cnt_2 OPERATOR(pg_catalog.=) events_table.event_type)) ORDER BY foo.avg DESC + avg | cnt_1 | cnt_2 | sum +--------------------------------------------------------------------- 3.5000000000000000 | 6 | 6 | 10 (1 row) @@ -112,14 +112,14 @@ FROM ) as baz ORDER BY 1 DESC; DEBUG: push down of limit count: 3 -DEBUG: generating subplan 10_1 for subquery SELECT (min(user_id) OPERATOR(pg_catalog.*) 2), (max(user_id) OPERATOR(pg_catalog./) 2), sum(user_id) AS sum, (count(user_id))::double precision AS count, (avg(user_id))::bigint AS avg FROM public.users_table ORDER BY (min(user_id) OPERATOR(pg_catalog.*) 2) DESC LIMIT 3 +DEBUG: generating subplan XXX_1 for subquery SELECT (min(user_id) OPERATOR(pg_catalog.*) 2), (max(user_id) OPERATOR(pg_catalog./) 2), sum(user_id) AS sum, (count(user_id))::double precision AS count, (avg(user_id))::bigint AS avg FROM public.users_table ORDER BY (min(user_id) OPERATOR(pg_catalog.*) 2) DESC LIMIT 3 DEBUG: push down of limit count: 3 -DEBUG: generating subplan 10_2 for subquery SELECT (min(value_3) OPERATOR(pg_catalog.*) (2)::double precision), (max(value_3) OPERATOR(pg_catalog./) (2)::double precision), sum(value_3) AS sum, count(value_3) AS count, avg(value_3) AS avg FROM public.users_table ORDER BY (min(value_3) OPERATOR(pg_catalog.*) (2)::double precision) DESC LIMIT 3 +DEBUG: generating subplan XXX_2 for subquery SELECT (min(value_3) OPERATOR(pg_catalog.*) (2)::double precision), (max(value_3) OPERATOR(pg_catalog./) (2)::double precision), sum(value_3) AS sum, count(value_3) AS count, avg(value_3) AS avg FROM public.users_table ORDER BY (min(value_3) OPERATOR(pg_catalog.*) (2)::double precision) DESC LIMIT 3 DEBUG: push down of limit count: 3 -DEBUG: generating subplan 10_3 for subquery SELECT min("time") AS min, max("time") AS max, count("time") AS count, count(*) FILTER (WHERE (user_id OPERATOR(pg_catalog.=) 3)) AS cnt_with_filter, count(*) FILTER (WHERE ((user_id)::text OPERATOR(pg_catalog.~~) '%3%'::text)) AS cnt_with_filter_2 FROM public.users_table ORDER BY (min("time")) DESC LIMIT 3 -DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT foo."?column?", foo."?column?_1" AS "?column?", foo.sum, foo.count, foo.avg, bar."?column?", bar."?column?_1" AS "?column?", bar.sum, bar.count, bar.avg, baz.min, baz.max, baz.count, baz.cnt_with_filter, baz.cnt_with_filter_2 FROM (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?", intermediate_result.sum, intermediate_result.count, intermediate_result.avg FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer, sum bigint, count double precision, avg bigint)) foo("?column?", "?column?_1", sum, count, avg), (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?", intermediate_result.sum, intermediate_result.count, intermediate_result.avg FROM read_intermediate_result('10_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" double precision, "?column?_1" double precision, sum double precision, count bigint, avg double precision)) bar("?column?", "?column?_1", sum, count, avg), (SELECT intermediate_result.min, intermediate_result.max, intermediate_result.count, intermediate_result.cnt_with_filter, intermediate_result.cnt_with_filter_2 FROM read_intermediate_result('10_3'::text, 'binary'::citus_copy_format) intermediate_result(min timestamp without time zone, max timestamp without time zone, count bigint, cnt_with_filter bigint, cnt_with_filter_2 bigint)) baz ORDER BY foo."?column?" DESC - ?column? | ?column? | sum | count | avg | ?column? | ?column? | sum | count | avg | min | max | count | cnt_with_filter | cnt_with_filter_2 -----------+----------+-----+-------+-----+----------+----------+-----+-------+-----------------+---------------------------------+---------------------------------+-------+-----------------+------------------- +DEBUG: generating subplan XXX_3 for subquery SELECT min("time") AS min, max("time") AS max, count("time") AS count, count(*) FILTER (WHERE (user_id OPERATOR(pg_catalog.=) 3)) AS cnt_with_filter, count(*) FILTER (WHERE ((user_id)::text OPERATOR(pg_catalog.~~) '%3%'::text)) AS cnt_with_filter_2 FROM public.users_table ORDER BY (min("time")) DESC LIMIT 3 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo."?column?", foo."?column?_1" AS "?column?", foo.sum, foo.count, foo.avg, bar."?column?", bar."?column?_1" AS "?column?", bar.sum, bar.count, bar.avg, baz.min, baz.max, baz.count, baz.cnt_with_filter, baz.cnt_with_filter_2 FROM (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?", intermediate_result.sum, intermediate_result.count, intermediate_result.avg FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer, sum bigint, count double precision, avg bigint)) foo("?column?", "?column?_1", sum, count, avg), (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?", intermediate_result.sum, intermediate_result.count, intermediate_result.avg FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" double precision, "?column?_1" double precision, sum double precision, count bigint, avg double precision)) bar("?column?", "?column?_1", sum, count, avg), (SELECT intermediate_result.min, intermediate_result.max, intermediate_result.count, intermediate_result.cnt_with_filter, intermediate_result.cnt_with_filter_2 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(min timestamp without time zone, max timestamp without time zone, count bigint, cnt_with_filter bigint, cnt_with_filter_2 bigint)) baz ORDER BY foo."?column?" DESC + ?column? | ?column? | sum | count | avg | ?column? | ?column? | sum | count | avg | min | max | count | cnt_with_filter | cnt_with_filter_2 +--------------------------------------------------------------------- 2 | 3 | 376 | 101 | 4 | 0 | 2.5 | 273 | 101 | 2.7029702970297 | Wed Nov 22 18:19:49.944985 2017 | Thu Nov 23 17:30:34.635085 2017 | 101 | 17 | 17 (1 row) @@ -165,16 +165,16 @@ FROM WHERE foo.avg != bar.cnt_1 AND baz.cnt_2 != events_table.event_type ORDER BY 1 DESC; DEBUG: push down of limit count: 3 -DEBUG: generating subplan 14_1 for subquery SELECT avg(((user_id)::numeric OPERATOR(pg_catalog.*) (5.0 OPERATOR(pg_catalog./) ((value_1)::numeric OPERATOR(pg_catalog.+) 0.1)))) AS avg FROM public.users_table ORDER BY (avg(((user_id)::numeric OPERATOR(pg_catalog.*) (5.0 OPERATOR(pg_catalog./) ((value_1)::numeric OPERATOR(pg_catalog.+) 0.1))))) DESC LIMIT 3 +DEBUG: generating subplan XXX_1 for subquery SELECT avg(((user_id)::numeric OPERATOR(pg_catalog.*) (5.0 OPERATOR(pg_catalog./) ((value_1)::numeric OPERATOR(pg_catalog.+) 0.1)))) AS avg FROM public.users_table ORDER BY (avg(((user_id)::numeric OPERATOR(pg_catalog.*) (5.0 OPERATOR(pg_catalog./) ((value_1)::numeric OPERATOR(pg_catalog.+) 0.1))))) DESC LIMIT 3 DEBUG: push down of limit count: 3 -DEBUG: generating subplan 14_2 for subquery SELECT sum(((((user_id)::numeric OPERATOR(pg_catalog.*) (5.0 OPERATOR(pg_catalog./) (((value_1 OPERATOR(pg_catalog.+) value_2))::numeric OPERATOR(pg_catalog.+) 0.1))))::double precision OPERATOR(pg_catalog.*) value_3)) AS cnt_1 FROM public.users_table ORDER BY (sum(((((user_id)::numeric OPERATOR(pg_catalog.*) (5.0 OPERATOR(pg_catalog./) (((value_1 OPERATOR(pg_catalog.+) value_2))::numeric OPERATOR(pg_catalog.+) 0.1))))::double precision OPERATOR(pg_catalog.*) value_3))) DESC LIMIT 3 +DEBUG: generating subplan XXX_2 for subquery SELECT sum(((((user_id)::numeric OPERATOR(pg_catalog.*) (5.0 OPERATOR(pg_catalog./) (((value_1 OPERATOR(pg_catalog.+) value_2))::numeric OPERATOR(pg_catalog.+) 0.1))))::double precision OPERATOR(pg_catalog.*) value_3)) AS cnt_1 FROM public.users_table ORDER BY (sum(((((user_id)::numeric OPERATOR(pg_catalog.*) (5.0 OPERATOR(pg_catalog./) (((value_1 OPERATOR(pg_catalog.+) value_2))::numeric OPERATOR(pg_catalog.+) 0.1))))::double precision OPERATOR(pg_catalog.*) value_3))) DESC LIMIT 3 DEBUG: push down of limit count: 4 -DEBUG: generating subplan 14_3 for subquery SELECT avg(CASE WHEN (user_id OPERATOR(pg_catalog.>) 4) THEN value_1 ELSE NULL::integer END) AS cnt_2, avg(CASE WHEN (user_id OPERATOR(pg_catalog.>) 500) THEN value_1 ELSE NULL::integer END) AS cnt_3, sum(CASE WHEN ((value_1 OPERATOR(pg_catalog.=) 1) OR (value_2 OPERATOR(pg_catalog.=) 1)) THEN 1 ELSE 0 END) AS sum_1, date_part('year'::text, max("time")) AS l_year, strpos((max(user_id))::text, '1'::text) AS pos FROM public.users_table ORDER BY (avg(CASE WHEN (user_id OPERATOR(pg_catalog.>) 4) THEN value_1 ELSE NULL::integer END)) DESC LIMIT 4 +DEBUG: generating subplan XXX_3 for subquery SELECT avg(CASE WHEN (user_id OPERATOR(pg_catalog.>) 4) THEN value_1 ELSE NULL::integer END) AS cnt_2, avg(CASE WHEN (user_id OPERATOR(pg_catalog.>) 500) THEN value_1 ELSE NULL::integer END) AS cnt_3, sum(CASE WHEN ((value_1 OPERATOR(pg_catalog.=) 1) OR (value_2 OPERATOR(pg_catalog.=) 1)) THEN 1 ELSE 0 END) AS sum_1, date_part('year'::text, max("time")) AS l_year, strpos((max(user_id))::text, '1'::text) AS pos FROM public.users_table ORDER BY (avg(CASE WHEN (user_id OPERATOR(pg_catalog.>) 4) THEN value_1 ELSE NULL::integer END)) DESC LIMIT 4 DEBUG: push down of limit count: 25 -DEBUG: generating subplan 14_4 for subquery SELECT COALESCE(value_3, (20)::double precision) AS count_pay FROM public.users_table ORDER BY COALESCE(value_3, (20)::double precision) OFFSET 20 LIMIT 5 -DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT DISTINCT ON (foo.avg) foo.avg, bar.cnt_1, baz.cnt_2, baz.cnt_3, baz.sum_1, baz.l_year, baz.pos, tar.count_pay FROM (SELECT intermediate_result.avg FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) foo, (SELECT intermediate_result.cnt_1 FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(cnt_1 double precision)) bar, (SELECT intermediate_result.cnt_2, intermediate_result.cnt_3, intermediate_result.sum_1, intermediate_result.l_year, intermediate_result.pos FROM read_intermediate_result('14_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt_2 numeric, cnt_3 numeric, sum_1 bigint, l_year double precision, pos integer)) baz, (SELECT intermediate_result.count_pay FROM read_intermediate_result('14_4'::text, 'binary'::citus_copy_format) intermediate_result(count_pay double precision)) tar, public.events_table WHERE (((foo.avg)::double precision OPERATOR(pg_catalog.<>) bar.cnt_1) AND (baz.cnt_2 OPERATOR(pg_catalog.<>) (events_table.event_type)::numeric)) ORDER BY foo.avg DESC - avg | cnt_1 | cnt_2 | cnt_3 | sum_1 | l_year | pos | count_pay --------------------------+------------------+--------------------+-------+-------+--------+-----+----------- +DEBUG: generating subplan XXX_4 for subquery SELECT COALESCE(value_3, (20)::double precision) AS count_pay FROM public.users_table ORDER BY COALESCE(value_3, (20)::double precision) OFFSET 20 LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT ON (foo.avg) foo.avg, bar.cnt_1, baz.cnt_2, baz.cnt_3, baz.sum_1, baz.l_year, baz.pos, tar.count_pay FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) foo, (SELECT intermediate_result.cnt_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(cnt_1 double precision)) bar, (SELECT intermediate_result.cnt_2, intermediate_result.cnt_3, intermediate_result.sum_1, intermediate_result.l_year, intermediate_result.pos FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt_2 numeric, cnt_3 numeric, sum_1 bigint, l_year double precision, pos integer)) baz, (SELECT intermediate_result.count_pay FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(count_pay double precision)) tar, public.events_table WHERE (((foo.avg)::double precision OPERATOR(pg_catalog.<>) bar.cnt_1) AND (baz.cnt_2 OPERATOR(pg_catalog.<>) (events_table.event_type)::numeric)) ORDER BY foo.avg DESC + avg | cnt_1 | cnt_2 | cnt_3 | sum_1 | l_year | pos | count_pay +--------------------------------------------------------------------- 30.14666771571734992301 | 3308.14619815793 | 2.5000000000000000 | | 31 | 2017 | 0 | 1 (1 row) @@ -192,11 +192,11 @@ FROM WHERE foo.avg = bar.avg2 ORDER BY 1 DESC, 2 DESC LIMIT 3; -DEBUG: generating subplan 19_1 for subquery SELECT avg(value_3) AS avg FROM public.users_table GROUP BY value_1, value_2 -DEBUG: Plan 19 query after replacing subqueries and CTEs: SELECT DISTINCT ON (foo.avg) foo.avg, bar.avg2 FROM (SELECT intermediate_result.avg FROM read_intermediate_result('19_1'::text, 'binary'::citus_copy_format) intermediate_result(avg double precision)) foo, (SELECT avg(users_table.value_3) AS avg2 FROM public.users_table GROUP BY users_table.value_1, users_table.value_2, users_table.user_id) bar WHERE (foo.avg OPERATOR(pg_catalog.=) bar.avg2) ORDER BY foo.avg DESC, bar.avg2 DESC LIMIT 3 +DEBUG: generating subplan XXX_1 for subquery SELECT avg(value_3) AS avg FROM public.users_table GROUP BY value_1, value_2 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT ON (foo.avg) foo.avg, bar.avg2 FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(avg double precision)) foo, (SELECT avg(users_table.value_3) AS avg2 FROM public.users_table GROUP BY users_table.value_1, users_table.value_2, users_table.user_id) bar WHERE (foo.avg OPERATOR(pg_catalog.=) bar.avg2) ORDER BY foo.avg DESC, bar.avg2 DESC LIMIT 3 DEBUG: push down of limit count: 3 - avg | avg2 ------+------ + avg | avg2 +--------------------------------------------------------------------- 5 | 5 4 | 4 3.5 | 3.5 @@ -250,12 +250,12 @@ FROM ( ORDER BY 3 DESC, 2 DESC, 1 DESC LIMIT 5; DEBUG: push down of limit count: 3 -DEBUG: generating subplan 21_1 for subquery SELECT user_id FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.>) 2) GROUP BY user_id HAVING (count(DISTINCT value_1) OPERATOR(pg_catalog.>) 2) ORDER BY user_id DESC LIMIT 3 -DEBUG: generating subplan 21_2 for subquery SELECT value_2 FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.>) 2) GROUP BY value_2 HAVING (count(DISTINCT value_1) OPERATOR(pg_catalog.>) 2) ORDER BY value_2 DESC LIMIT 3 -DEBUG: generating subplan 21_3 for subquery SELECT avg(user_id) AS avg FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.>) 2) GROUP BY value_2 HAVING (sum(value_1) OPERATOR(pg_catalog.>) 10) ORDER BY ((sum(value_3) OPERATOR(pg_catalog.-) (avg(value_1))::double precision) OPERATOR(pg_catalog.-) (COALESCE((array_upper(ARRAY[max(user_id)], 1) OPERATOR(pg_catalog.*) 5), 0))::double precision) DESC LIMIT 3 -DEBUG: Plan 21 query after replacing subqueries and CTEs: SELECT a.user_id, b.value_2, c.avg FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('21_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) a, (SELECT intermediate_result.value_2 FROM read_intermediate_result('21_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) b, (SELECT intermediate_result.avg FROM read_intermediate_result('21_3'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) c WHERE (b.value_2 OPERATOR(pg_catalog.<>) a.user_id) ORDER BY c.avg DESC, b.value_2 DESC, a.user_id DESC LIMIT 5 - user_id | value_2 | avg ----------+---------+-------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.>) 2) GROUP BY user_id HAVING (count(DISTINCT value_1) OPERATOR(pg_catalog.>) 2) ORDER BY user_id DESC LIMIT 3 +DEBUG: generating subplan XXX_2 for subquery SELECT value_2 FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.>) 2) GROUP BY value_2 HAVING (count(DISTINCT value_1) OPERATOR(pg_catalog.>) 2) ORDER BY value_2 DESC LIMIT 3 +DEBUG: generating subplan XXX_3 for subquery SELECT avg(user_id) AS avg FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.>) 2) GROUP BY value_2 HAVING (sum(value_1) OPERATOR(pg_catalog.>) 10) ORDER BY ((sum(value_3) OPERATOR(pg_catalog.-) (avg(value_1))::double precision) OPERATOR(pg_catalog.-) (COALESCE((array_upper(ARRAY[max(user_id)], 1) OPERATOR(pg_catalog.*) 5), 0))::double precision) DESC LIMIT 3 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT a.user_id, b.value_2, c.avg FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) a, (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) b, (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) c WHERE (b.value_2 OPERATOR(pg_catalog.<>) a.user_id) ORDER BY c.avg DESC, b.value_2 DESC, a.user_id DESC LIMIT 5 + user_id | value_2 | avg +--------------------------------------------------------------------- 4 | 5 | 4.1666666666666667 3 | 5 | 4.1666666666666667 5 | 4 | 4.1666666666666667 @@ -288,11 +288,11 @@ FROM WHERE foo.user_id > bar.user_id ORDER BY 1 DESC; DEBUG: push down of limit count: 5 -DEBUG: generating subplan 25_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 -DEBUG: generating subplan 25_2 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND false AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 -DEBUG: Plan 25 query after replacing subqueries and CTEs: SELECT bar.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('25_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('25_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.>) bar.user_id) ORDER BY bar.user_id DESC - user_id ---------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND false AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT bar.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.>) bar.user_id) ORDER BY bar.user_id DESC + user_id +--------------------------------------------------------------------- (0 rows) -- window functions tests, both is recursively planned @@ -334,11 +334,11 @@ SELECT * FROM ) bar WHERE foo.user_id = bar.user_id ORDER BY foo.rnk DESC, foo.time DESC, bar.time LIMIT 5; DEBUG: push down of limit count: 4 -DEBUG: generating subplan 28_1 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4, rnk FROM (SELECT events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4, rank() OVER my_win AS rnk FROM public.events_table WINDOW my_win AS (PARTITION BY events_table.user_id ORDER BY events_table."time" DESC) ORDER BY (rank() OVER my_win) DESC) foo_inner ORDER BY user_id DESC LIMIT 4 -DEBUG: generating subplan 28_2 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4, rank() OVER my_win AS rnk FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) 3) WINDOW my_win AS (PARTITION BY event_type ORDER BY "time" DESC) -DEBUG: Plan 28 query after replacing subqueries and CTEs: SELECT foo.user_id, foo."time", foo.rnk, bar.user_id, bar."time", bar.rnk FROM (SELECT foo_1.user_id, foo_1."time", foo_1.rnk FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4, intermediate_result.rnk FROM read_intermediate_result('28_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint, rnk bigint)) foo_1 ORDER BY foo_1.rnk DESC, foo_1.user_id DESC, foo_1."time" DESC) foo, (SELECT foo_1.user_id, foo_1."time", foo_1.rnk FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4, intermediate_result.rnk FROM read_intermediate_result('28_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint, rnk bigint)) foo_1 ORDER BY foo_1.rnk DESC, foo_1.user_id DESC, foo_1."time" DESC) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) ORDER BY foo.rnk DESC, foo."time" DESC, bar."time" LIMIT 5 - user_id | time | rnk | user_id | time | rnk ----------+------+-----+---------+------+----- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4, rnk FROM (SELECT events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4, rank() OVER my_win AS rnk FROM public.events_table WINDOW my_win AS (PARTITION BY events_table.user_id ORDER BY events_table."time" DESC) ORDER BY (rank() OVER my_win) DESC) foo_inner ORDER BY user_id DESC LIMIT 4 +DEBUG: generating subplan XXX_2 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4, rank() OVER my_win AS rnk FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) 3) WINDOW my_win AS (PARTITION BY event_type ORDER BY "time" DESC) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id, foo."time", foo.rnk, bar.user_id, bar."time", bar.rnk FROM (SELECT foo_1.user_id, foo_1."time", foo_1.rnk FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4, intermediate_result.rnk FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint, rnk bigint)) foo_1 ORDER BY foo_1.rnk DESC, foo_1.user_id DESC, foo_1."time" DESC) foo, (SELECT foo_1.user_id, foo_1."time", foo_1.rnk FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4, intermediate_result.rnk FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint, rnk bigint)) foo_1 ORDER BY foo_1.rnk DESC, foo_1.user_id DESC, foo_1."time" DESC) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) ORDER BY foo.rnk DESC, foo."time" DESC, bar."time" LIMIT 5 + user_id | time | rnk | user_id | time | rnk +--------------------------------------------------------------------- (0 rows) -- cursor test @@ -355,29 +355,29 @@ BEGIN; ORDER BY 1 DESC, 2 DESC LIMIT 3; DEBUG: push down of limit count: 20 -DEBUG: generating subplan 31_1 for subquery SELECT user_id FROM public.users_table GROUP BY user_id ORDER BY (count(*)) DESC LIMIT 20 -DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT event_type, count(DISTINCT value_2) AS count FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) GROUP BY event_type ORDER BY event_type DESC, (count(DISTINCT value_2)) DESC LIMIT 3 +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table GROUP BY user_id ORDER BY (count(*)) DESC LIMIT 20 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT event_type, count(DISTINCT value_2) AS count FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) GROUP BY event_type ORDER BY event_type DESC, (count(DISTINCT value_2)) DESC LIMIT 3 FETCH 1 FROM recursive_subquery; - event_type | count -------------+------- + event_type | count +--------------------------------------------------------------------- 6 | 1 (1 row) FETCH 1 FROM recursive_subquery; - event_type | count -------------+------- + event_type | count +--------------------------------------------------------------------- 5 | 3 (1 row) FETCH 1 FROM recursive_subquery; - event_type | count -------------+------- + event_type | count +--------------------------------------------------------------------- 4 | 6 (1 row) FETCH 1 FROM recursive_subquery; - event_type | count -------------+------- + event_type | count +--------------------------------------------------------------------- (0 rows) COMMIT; @@ -395,19 +395,19 @@ BEGIN; ORDER BY 1 DESC, 2 DESC LIMIT 3; DEBUG: push down of limit count: 20 -DEBUG: generating subplan 33_1 for subquery SELECT user_id FROM public.users_table GROUP BY user_id ORDER BY (count(*)) DESC LIMIT 20 -DEBUG: Plan 33 query after replacing subqueries and CTEs: SELECT event_type, count(DISTINCT value_2) AS count FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('33_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) GROUP BY event_type ORDER BY event_type DESC, (count(DISTINCT value_2)) DESC LIMIT 3 +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table GROUP BY user_id ORDER BY (count(*)) DESC LIMIT 20 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT event_type, count(DISTINCT value_2) AS count FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) GROUP BY event_type ORDER BY event_type DESC, (count(DISTINCT value_2)) DESC LIMIT 3 FETCH ALL FROM recursive_subquery; - event_type | count -------------+------- + event_type | count +--------------------------------------------------------------------- 6 | 1 5 | 3 4 | 6 (3 rows) FETCH ALL FROM recursive_subquery; - event_type | count -------------+------- + event_type | count +--------------------------------------------------------------------- (0 rows) COMMIT; diff --git a/src/test/regress/expected/subquery_executors.out b/src/test/regress/expected/subquery_executors.out index 43ab91eb6..560c6aee1 100644 --- a/src/test/regress/expected/subquery_executors.out +++ b/src/test/regress/expected/subquery_executors.out @@ -6,86 +6,86 @@ SET search_path TO subquery_executor, public; CREATE TABLE users_table_local AS SELECT * FROM users_table; SET client_min_messages TO DEBUG1; -- subquery with router planner -SELECT - count(*) +SELECT + count(*) FROM ( SELECT value_2 FROM users_table WHERE user_id = 15 OFFSET 0 -) as foo, +) as foo, ( SELECT user_id FROM users_table ) as bar -WHERE foo.value_2 = bar.user_id; -DEBUG: generating subplan 2_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 15) OFFSET 0 -DEBUG: Plan 2 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('2_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) - count -------- +WHERE foo.value_2 = bar.user_id; +DEBUG: generating subplan XXX_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 15) OFFSET 0 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) + count +--------------------------------------------------------------------- 0 (1 row) -- subquery with router but not logical plannable -- bar is recursively planned SELECT - count(*) + count(*) FROM ( SELECT user_id, sum(value_2) over (partition by user_id) AS counter FROM users_table WHERE user_id = 15 -) as foo, +) as foo, ( SELECT user_id FROM users_table ) as bar -WHERE foo.counter = bar.user_id; -DEBUG: generating subplan 4_1 for subquery SELECT user_id FROM public.users_table -DEBUG: Plan 4 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id, sum(users_table.value_2) OVER (PARTITION BY users_table.user_id) AS counter FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) 15)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.counter OPERATOR(pg_catalog.=) bar.user_id) - count -------- +WHERE foo.counter = bar.user_id; +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id, sum(users_table.value_2) OVER (PARTITION BY users_table.user_id) AS counter FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) 15)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.counter OPERATOR(pg_catalog.=) bar.user_id) + count +--------------------------------------------------------------------- 0 (1 row) -- subquery with real-time query -SELECT - count(*) +SELECT + count(*) FROM ( SELECT value_2 FROM users_table WHERE user_id != 15 OFFSET 0 -) as foo, +) as foo, ( SELECT user_id FROM users_table ) as bar -WHERE foo.value_2 = bar.user_id; -DEBUG: generating subplan 6_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.<>) 15) OFFSET 0 -DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) - count -------- +WHERE foo.value_2 = bar.user_id; +DEBUG: generating subplan XXX_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.<>) 15) OFFSET 0 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) + count +--------------------------------------------------------------------- 1612 (1 row) -- subquery with repartition query SET citus.enable_repartition_joins to ON; -SELECT - count(*) +SELECT + count(*) FROM ( SELECT DISTINCT users_table.value_2 FROM users_table, events_table WHERE users_table.user_id = events_table.value_2 AND users_table.user_id < 2 -) as foo, +) as foo, ( SELECT user_id FROM users_table ) as bar -WHERE foo.value_2 = bar.user_id; -DEBUG: generating subplan 8_1 for subquery SELECT DISTINCT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (users_table.user_id OPERATOR(pg_catalog.<) 2)) -DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) - count -------- +WHERE foo.value_2 = bar.user_id; +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (users_table.user_id OPERATOR(pg_catalog.<) 2)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) + count +--------------------------------------------------------------------- 58 (1 row) -- mixed of all executors (including local execution) -SELECT - count(*) +SELECT + count(*) FROM ( SELECT value_2 FROM users_table WHERE user_id = 15 OFFSET 0 -) as foo, +) as foo, ( SELECT user_id FROM users_table OFFSET 0 ) as bar, @@ -95,52 +95,52 @@ FROM ( SELECT user_id FROM users_table_local WHERE user_id = 2 ) baw -WHERE foo.value_2 = bar.user_id AND baz.value_2 = bar.user_id AND bar.user_id = baw.user_id; -DEBUG: generating subplan 10_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 15) OFFSET 0 -DEBUG: generating subplan 10_2 for subquery SELECT user_id FROM public.users_table OFFSET 0 -DEBUG: generating subplan 10_3 for subquery SELECT DISTINCT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (users_table.user_id OPERATOR(pg_catalog.<) 2)) -DEBUG: generating subplan 10_4 for subquery SELECT user_id FROM subquery_executor.users_table_local WHERE (user_id OPERATOR(pg_catalog.=) 2) -DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('10_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar, (SELECT intermediate_result.value_2 FROM read_intermediate_result('10_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) baz, (SELECT intermediate_result.user_id FROM read_intermediate_result('10_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) baw WHERE ((foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) AND (baz.value_2 OPERATOR(pg_catalog.=) bar.user_id) AND (bar.user_id OPERATOR(pg_catalog.=) baw.user_id)) - count -------- +WHERE foo.value_2 = bar.user_id AND baz.value_2 = bar.user_id AND bar.user_id = baw.user_id; +DEBUG: generating subplan XXX_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 15) OFFSET 0 +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table OFFSET 0 +DEBUG: generating subplan XXX_3 for subquery SELECT DISTINCT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (users_table.user_id OPERATOR(pg_catalog.<) 2)) +DEBUG: generating subplan XXX_4 for subquery SELECT user_id FROM subquery_executor.users_table_local WHERE (user_id OPERATOR(pg_catalog.=) 2) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar, (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) baz, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) baw WHERE ((foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) AND (baz.value_2 OPERATOR(pg_catalog.=) bar.user_id) AND (bar.user_id OPERATOR(pg_catalog.=) baw.user_id)) + count +--------------------------------------------------------------------- 0 (1 row) SET citus.enable_repartition_joins to OFF; --- final query is router -SELECT - count(*) +-- final query is router +SELECT + count(*) FROM ( SELECT value_2 FROM users_table WHERE user_id = 1 OFFSET 0 -) as foo, +) as foo, ( SELECT user_id FROM users_table WHERE user_id = 2 OFFSET 0 ) as bar -WHERE foo.value_2 = bar.user_id; -DEBUG: generating subplan 14_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 1) OFFSET 0 -DEBUG: generating subplan 14_2 for subquery SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 2) OFFSET 0 -DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) - count -------- +WHERE foo.value_2 = bar.user_id; +DEBUG: generating subplan XXX_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 1) OFFSET 0 +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 2) OFFSET 0 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) + count +--------------------------------------------------------------------- 18 (1 row) -- final query is real-time -SELECT - count(*) +SELECT + count(*) FROM ( SELECT value_2 FROM users_table WHERE user_id = 1 OFFSET 0 -) as foo, +) as foo, ( SELECT user_id FROM users_table WHERE user_id != 2 ) as bar -WHERE foo.value_2 = bar.user_id; -DEBUG: generating subplan 17_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 1) OFFSET 0 -DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.<>) 2)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) - count -------- +WHERE foo.value_2 = bar.user_id; +DEBUG: generating subplan XXX_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 1) OFFSET 0 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.<>) 2)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) + count +--------------------------------------------------------------------- 103 (1 row) diff --git a/src/test/regress/expected/subquery_in_where.out b/src/test/regress/expected/subquery_in_where.out index 8a66110a0..96cf8e7c9 100644 --- a/src/test/regress/expected/subquery_in_where.out +++ b/src/test/regress/expected/subquery_in_where.out @@ -14,11 +14,11 @@ SELECT Count(*) FROM event_id WHERE events_user_id IN (SELECT user_id FROM users_table); -DEBUG: generating subplan 1_1 for CTE event_id: SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table -DEBUG: generating subplan 1_2 for subquery SELECT user_id FROM public.users_table -DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) event_id WHERE (events_user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('1_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) - count -------- +DEBUG: generating subplan XXX_1 for CTE event_id: SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) event_id WHERE (events_user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) + count +--------------------------------------------------------------------- 101 (1 row) @@ -33,8 +33,8 @@ FROM event_id WHERE events_user_id IN (SELECT user_id FROM users_table WHERE users_table.time = events_time); -DEBUG: generating subplan 4_1 for CTE event_id: SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table -DEBUG: Plan 4 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) event_id WHERE (events_user_id OPERATOR(pg_catalog.=) ANY (SELECT users_table.user_id FROM public.users_table WHERE (users_table."time" OPERATOR(pg_catalog.=) event_id.events_time))) +DEBUG: generating subplan XXX_1 for CTE event_id: SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) event_id WHERE (events_user_id OPERATOR(pg_catalog.=) ANY (SELECT users_table.user_id FROM public.users_table WHERE (users_table."time" OPERATOR(pg_catalog.=) event_id.events_time))) ERROR: cannot pushdown the subquery DETAIL: Complex subqueries and CTEs are not allowed in the FROM clause when the query has subqueries in the WHERE clause and it references a column from another query -- Recurring tuples as empty join tree @@ -43,11 +43,11 @@ FROM (SELECT 1 AS id, 2 AS value_1, 3 AS value_3 UNION ALL SELECT 2 as id, 3 as value_1, 4 as value_3) AS tt1 WHERE id IN (SELECT user_id FROM events_table); -DEBUG: generating subplan 6_1 for subquery SELECT 1 AS id, 2 AS value_1, 3 AS value_3 UNION ALL SELECT 2 AS id, 3 AS value_1, 4 AS value_3 -DEBUG: generating subplan 6_2 for subquery SELECT user_id FROM public.events_table -DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT id, value_1, value_3 FROM (SELECT intermediate_result.id, intermediate_result.value_1, intermediate_result.value_3 FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, value_1 integer, value_3 integer)) tt1 WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('6_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) - id | value_1 | value_3 -----+---------+--------- +DEBUG: generating subplan XXX_1 for subquery SELECT 1 AS id, 2 AS value_1, 3 AS value_3 UNION ALL SELECT 2 AS id, 3 AS value_1, 4 AS value_3 +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.events_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT id, value_1, value_3 FROM (SELECT intermediate_result.id, intermediate_result.value_1, intermediate_result.value_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, value_1 integer, value_3 integer)) tt1 WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) + id | value_1 | value_3 +--------------------------------------------------------------------- 1 | 2 | 3 2 | 3 | 4 (2 rows) @@ -72,15 +72,15 @@ WHERE events_user_id IN ( ORDER BY 1 limit 10)); DEBUG: push down of limit count: 10 -DEBUG: generating subplan 8_1 for subquery SELECT user_id FROM public.users_table ORDER BY user_id LIMIT 10 +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table ORDER BY user_id LIMIT 10 DEBUG: push down of limit count: 10 -DEBUG: generating subplan 8_2 for subquery SELECT value_1 FROM public.users_table ORDER BY value_1 LIMIT 10 -DEBUG: generating subplan 8_3 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION ALL SELECT intermediate_result.value_1 FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer) -DEBUG: generating subplan 8_4 for CTE event_id: SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table -DEBUG: generating subplan 8_5 for subquery SELECT events_user_id, events_time, event_type FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('8_4'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) event_id ORDER BY events_user_id, events_time, event_type LIMIT 10 -DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('8_5'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE (events_user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('8_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) - count -------- +DEBUG: generating subplan XXX_2 for subquery SELECT value_1 FROM public.users_table ORDER BY value_1 LIMIT 10 +DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION ALL SELECT intermediate_result.value_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer) +DEBUG: generating subplan XXX_4 for CTE event_id: SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table +DEBUG: generating subplan XXX_5 for subquery SELECT events_user_id, events_time, event_type FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) event_id ORDER BY events_user_id, events_time, event_type LIMIT 10 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE (events_user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) + count +--------------------------------------------------------------------- 10 (1 row) @@ -129,16 +129,16 @@ IN LIMIT 10 ); -DEBUG: generating subplan 14_1 for CTE event_id: SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table -DEBUG: generating subplan 14_2 for subquery SELECT events_user_id FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) event_id ORDER BY events_user_id LIMIT 10 +DEBUG: generating subplan XXX_1 for CTE event_id: SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table +DEBUG: generating subplan XXX_2 for subquery SELECT events_user_id FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) event_id ORDER BY events_user_id LIMIT 10 DEBUG: push down of limit count: 10 -DEBUG: generating subplan 14_3 for subquery SELECT user_id FROM public.users_table ORDER BY user_id LIMIT 10 +DEBUG: generating subplan XXX_3 for subquery SELECT user_id FROM public.users_table ORDER BY user_id LIMIT 10 DEBUG: push down of limit count: 10 -DEBUG: generating subplan 14_4 for subquery SELECT value_1 FROM public.users_table ORDER BY value_1 LIMIT 10 -DEBUG: generating subplan 14_5 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('14_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION ALL SELECT intermediate_result.value_1 FROM read_intermediate_result('14_4'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer) -DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('14_5'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) sub_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.events_user_id FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer))) - user_id ---------- +DEBUG: generating subplan XXX_4 for subquery SELECT value_1 FROM public.users_table ORDER BY value_1 LIMIT 10 +DEBUG: generating subplan XXX_5 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION ALL SELECT intermediate_result.value_1 FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) sub_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.events_user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer))) + user_id +--------------------------------------------------------------------- 1 1 1 @@ -172,11 +172,11 @@ WHERE user_id ); DEBUG: push down of limit count: 10 -DEBUG: generating subplan 20_1 for subquery SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table ORDER BY user_id, "time" LIMIT 10 -DEBUG: generating subplan 20_2 for subquery SELECT max((abs((user_id OPERATOR(pg_catalog.*) 1)) OPERATOR(pg_catalog.+) mod(user_id, 3))) AS val_1 FROM public.users_table GROUP BY user_id -DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE (events_user_id OPERATOR(pg_catalog.<=) ANY (SELECT intermediate_result.val_1 FROM read_intermediate_result('20_2'::text, 'binary'::citus_copy_format) intermediate_result(val_1 integer))) - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table ORDER BY user_id, "time" LIMIT 10 +DEBUG: generating subplan XXX_2 for subquery SELECT max((abs((user_id OPERATOR(pg_catalog.*) 1)) OPERATOR(pg_catalog.+) mod(user_id, 3))) AS val_1 FROM public.users_table GROUP BY user_id +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE (events_user_id OPERATOR(pg_catalog.<=) ANY (SELECT intermediate_result.val_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(val_1 integer))) + count +--------------------------------------------------------------------- 10 (1 row) @@ -202,11 +202,11 @@ IN ( user_id ); DEBUG: push down of limit count: 10 -DEBUG: generating subplan 23_1 for subquery SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table LIMIT 10 -DEBUG: generating subplan 23_2 for subquery SELECT DISTINCT user_id FROM public.users_table GROUP BY user_id -DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('23_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE (events_user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('23_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table LIMIT 10 +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT user_id FROM public.users_table GROUP BY user_id +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE (events_user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) + count +--------------------------------------------------------------------- 10 (1 row) @@ -244,12 +244,12 @@ AND user_id ); DEBUG: push down of limit count: 10 -DEBUG: generating subplan 26_1 for subquery SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table ORDER BY user_id, "time", event_type LIMIT 10 -DEBUG: generating subplan 26_2 for subquery SELECT min(user_id) AS min FROM public.users_table GROUP BY user_id -DEBUG: generating subplan 26_3 for subquery SELECT max(user_id) AS max FROM public.users_table GROUP BY user_id -DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE ((events_user_id OPERATOR(pg_catalog.>=) ANY (SELECT intermediate_result.min FROM read_intermediate_result('26_2'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) AND (events_user_id OPERATOR(pg_catalog.<=) ANY (SELECT intermediate_result.max FROM read_intermediate_result('26_3'::text, 'binary'::citus_copy_format) intermediate_result(max integer)))) - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table ORDER BY user_id, "time", event_type LIMIT 10 +DEBUG: generating subplan XXX_2 for subquery SELECT min(user_id) AS min FROM public.users_table GROUP BY user_id +DEBUG: generating subplan XXX_3 for subquery SELECT max(user_id) AS max FROM public.users_table GROUP BY user_id +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE ((events_user_id OPERATOR(pg_catalog.>=) ANY (SELECT intermediate_result.min FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) AND (events_user_id OPERATOR(pg_catalog.<=) ANY (SELECT intermediate_result.max FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(max integer)))) + count +--------------------------------------------------------------------- 10 (1 row) @@ -287,12 +287,12 @@ AND user_id ); DEBUG: push down of limit count: 10 -DEBUG: generating subplan 30_1 for subquery SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table ORDER BY user_id, "time", event_type LIMIT 10 -DEBUG: generating subplan 30_2 for subquery SELECT min(user_id) AS min FROM public.users_table GROUP BY user_id -DEBUG: generating subplan 30_3 for subquery SELECT max(value_2) AS max FROM public.users_table GROUP BY user_id -DEBUG: Plan 30 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('30_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE ((events_user_id OPERATOR(pg_catalog.>=) ANY (SELECT intermediate_result.min FROM read_intermediate_result('30_2'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) AND (events_user_id OPERATOR(pg_catalog.<=) ANY (SELECT intermediate_result.max FROM read_intermediate_result('30_3'::text, 'binary'::citus_copy_format) intermediate_result(max integer)))) - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table ORDER BY user_id, "time", event_type LIMIT 10 +DEBUG: generating subplan XXX_2 for subquery SELECT min(user_id) AS min FROM public.users_table GROUP BY user_id +DEBUG: generating subplan XXX_3 for subquery SELECT max(value_2) AS max FROM public.users_table GROUP BY user_id +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE ((events_user_id OPERATOR(pg_catalog.>=) ANY (SELECT intermediate_result.min FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) AND (events_user_id OPERATOR(pg_catalog.<=) ANY (SELECT intermediate_result.max FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(max integer)))) + count +--------------------------------------------------------------------- 10 (1 row) @@ -324,14 +324,14 @@ SELECT COUNT(*) FROM cte; -DEBUG: generating subplan 34_1 for CTE cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4 FROM public.users_table ORDER BY users_table.user_id, users_table.value_2 DESC LIMIT 10) sub_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT events_table.value_2 FROM public.events_table)) +DEBUG: generating subplan XXX_1 for CTE cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4 FROM public.users_table ORDER BY users_table.user_id, users_table.value_2 DESC LIMIT 10) sub_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT events_table.value_2 FROM public.events_table)) DEBUG: push down of limit count: 10 -DEBUG: generating subplan 35_1 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table ORDER BY user_id, value_2 DESC LIMIT 10 -DEBUG: generating subplan 35_2 for subquery SELECT value_2 FROM public.events_table -DEBUG: Plan 35 query after replacing subqueries and CTEs: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('35_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) sub_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('35_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) -DEBUG: Plan 34 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('34_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) cte - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table ORDER BY user_id, value_2 DESC LIMIT 10 +DEBUG: generating subplan XXX_2 for subquery SELECT value_2 FROM public.events_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) sub_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) cte + count +--------------------------------------------------------------------- 10 (1 row) @@ -362,12 +362,12 @@ FROM ) ) as sub_table_2; DEBUG: push down of limit count: 10 -DEBUG: generating subplan 38_1 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table ORDER BY user_id, value_2 DESC LIMIT 10 -DEBUG: generating subplan 38_2 for subquery SELECT value_2 FROM public.events_table -DEBUG: generating subplan 38_3 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('38_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) sub_table_1 WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('38_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) -DEBUG: Plan 38 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('38_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) sub_table_2 - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table ORDER BY user_id, value_2 DESC LIMIT 10 +DEBUG: generating subplan XXX_2 for subquery SELECT value_2 FROM public.events_table +DEBUG: generating subplan XXX_3 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) sub_table_1 WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) sub_table_2 + count +--------------------------------------------------------------------- 10 (1 row) @@ -415,15 +415,15 @@ WHERE GROUP BY user_id); DEBUG: push down of limit count: 10 -DEBUG: generating subplan 42_1 for subquery SELECT user_id FROM public.users_table ORDER BY user_id LIMIT 10 +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table ORDER BY user_id LIMIT 10 DEBUG: push down of limit count: 10 -DEBUG: generating subplan 42_2 for subquery SELECT user_id AS user_id_2 FROM public.users_table ORDER BY user_id LIMIT 10 -DEBUG: generating subplan 42_3 for subquery SELECT value_2 FROM public.events_table -DEBUG: generating subplan 42_4 for subquery SELECT t1.user_id, t2.user_id_2 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) t1 JOIN (SELECT intermediate_result.user_id_2 FROM read_intermediate_result('42_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id_2 integer)) t2 ON ((t1.user_id OPERATOR(pg_catalog.=) t2.user_id_2))) WHERE (t1.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('42_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) -DEBUG: generating subplan 42_5 for subquery SELECT min(user_id) AS min FROM public.events_table GROUP BY user_id -DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT sum(user_id) AS sum FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_2 FROM read_intermediate_result('42_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_2 integer)) t3 WHERE (user_id OPERATOR(pg_catalog.>) ANY (SELECT intermediate_result.min FROM read_intermediate_result('42_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) - sum ------ +DEBUG: generating subplan XXX_2 for subquery SELECT user_id AS user_id_2 FROM public.users_table ORDER BY user_id LIMIT 10 +DEBUG: generating subplan XXX_3 for subquery SELECT value_2 FROM public.events_table +DEBUG: generating subplan XXX_4 for subquery SELECT t1.user_id, t2.user_id_2 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) t1 JOIN (SELECT intermediate_result.user_id_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id_2 integer)) t2 ON ((t1.user_id OPERATOR(pg_catalog.=) t2.user_id_2))) WHERE (t1.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) +DEBUG: generating subplan XXX_5 for subquery SELECT min(user_id) AS min FROM public.events_table GROUP BY user_id +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT sum(user_id) AS sum FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_2 FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_2 integer)) t3 WHERE (user_id OPERATOR(pg_catalog.>) ANY (SELECT intermediate_result.min FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) + sum +--------------------------------------------------------------------- 18 (1 row) @@ -468,15 +468,15 @@ WHERE EXISTS WHERE events_table.value_2 = events_table.user_id); DEBUG: push down of limit count: 10 -DEBUG: generating subplan 48_1 for subquery SELECT user_id FROM public.users_table ORDER BY user_id LIMIT 10 +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table ORDER BY user_id LIMIT 10 DEBUG: push down of limit count: 10 -DEBUG: generating subplan 48_2 for subquery SELECT user_id AS user_id_2 FROM public.users_table ORDER BY user_id LIMIT 10 -DEBUG: generating subplan 48_3 for subquery SELECT value_2 FROM public.events_table -DEBUG: generating subplan 48_4 for subquery SELECT t1.user_id, t2.user_id_2 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('48_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) t1 JOIN (SELECT intermediate_result.user_id_2 FROM read_intermediate_result('48_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id_2 integer)) t2 ON ((t1.user_id OPERATOR(pg_catalog.=) t2.user_id_2))) WHERE (t1.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('48_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) -DEBUG: generating subplan 48_5 for subquery SELECT 1, 2 FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.=) user_id) -DEBUG: Plan 48 query after replacing subqueries and CTEs: SELECT sum(user_id) AS sum FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_2 FROM read_intermediate_result('48_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_2 integer)) t3 WHERE (EXISTS (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('48_5'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer))) - sum ------ +DEBUG: generating subplan XXX_2 for subquery SELECT user_id AS user_id_2 FROM public.users_table ORDER BY user_id LIMIT 10 +DEBUG: generating subplan XXX_3 for subquery SELECT value_2 FROM public.events_table +DEBUG: generating subplan XXX_4 for subquery SELECT t1.user_id, t2.user_id_2 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) t1 JOIN (SELECT intermediate_result.user_id_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id_2 integer)) t2 ON ((t1.user_id OPERATOR(pg_catalog.=) t2.user_id_2))) WHERE (t1.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) +DEBUG: generating subplan XXX_5 for subquery SELECT 1, 2 FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.=) user_id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT sum(user_id) AS sum FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_2 FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_2 integer)) t3 WHERE (EXISTS (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer))) + sum +--------------------------------------------------------------------- 67 (1 row) @@ -522,15 +522,15 @@ WHERE NOT EXISTS WHERE events_table.value_2 = events_table.user_id + 6); DEBUG: push down of limit count: 10 -DEBUG: generating subplan 54_1 for subquery SELECT user_id FROM public.users_table ORDER BY user_id LIMIT 10 +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table ORDER BY user_id LIMIT 10 DEBUG: push down of limit count: 10 -DEBUG: generating subplan 54_2 for subquery SELECT user_id AS user_id_2 FROM public.users_table ORDER BY user_id LIMIT 10 -DEBUG: generating subplan 54_3 for subquery SELECT value_2 FROM public.events_table -DEBUG: generating subplan 54_4 for subquery SELECT t1.user_id, t2.user_id_2 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('54_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) t1 JOIN (SELECT intermediate_result.user_id_2 FROM read_intermediate_result('54_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id_2 integer)) t2 ON ((t1.user_id OPERATOR(pg_catalog.=) t2.user_id_2))) WHERE (t1.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('54_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) -DEBUG: generating subplan 54_5 for subquery SELECT 1, 2 FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.=) (user_id OPERATOR(pg_catalog.+) 6)) -DEBUG: Plan 54 query after replacing subqueries and CTEs: SELECT sum(user_id) AS sum FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_2 FROM read_intermediate_result('54_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_2 integer)) t3 WHERE (NOT (EXISTS (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('54_5'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer)))) - sum ------ +DEBUG: generating subplan XXX_2 for subquery SELECT user_id AS user_id_2 FROM public.users_table ORDER BY user_id LIMIT 10 +DEBUG: generating subplan XXX_3 for subquery SELECT value_2 FROM public.events_table +DEBUG: generating subplan XXX_4 for subquery SELECT t1.user_id, t2.user_id_2 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) t1 JOIN (SELECT intermediate_result.user_id_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id_2 integer)) t2 ON ((t1.user_id OPERATOR(pg_catalog.=) t2.user_id_2))) WHERE (t1.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) +DEBUG: generating subplan XXX_5 for subquery SELECT 1, 2 FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.=) (user_id OPERATOR(pg_catalog.+) 6)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT sum(user_id) AS sum FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_2 FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_2 integer)) t3 WHERE (NOT (EXISTS (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer)))) + sum +--------------------------------------------------------------------- 67 (1 row) @@ -553,12 +553,12 @@ WHERE row(user_id, value_1) = min(user_id) + 1, min(user_id) + 1 FROM events_table); -DEBUG: generating subplan 60_1 for subquery SELECT (min(user_id) OPERATOR(pg_catalog.+) 1), (min(user_id) OPERATOR(pg_catalog.+) 1) FROM public.events_table +DEBUG: generating subplan XXX_1 for subquery SELECT (min(user_id) OPERATOR(pg_catalog.+) 1), (min(user_id) OPERATOR(pg_catalog.+) 1) FROM public.events_table DEBUG: push down of limit count: 10 -DEBUG: generating subplan 60_2 for subquery SELECT user_id, value_1 FROM public.users_table ORDER BY user_id, value_1 LIMIT 10 -DEBUG: Plan 60 query after replacing subqueries and CTEs: SELECT user_id, value_1 FROM (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('60_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) t3 WHERE ((user_id, value_1) OPERATOR(pg_catalog.=) (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('60_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer))) - user_id | value_1 ----------+--------- +DEBUG: generating subplan XXX_2 for subquery SELECT user_id, value_1 FROM public.users_table ORDER BY user_id, value_1 LIMIT 10 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, value_1 FROM (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) t3 WHERE ((user_id, value_1) OPERATOR(pg_catalog.=) (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer))) + user_id | value_1 +--------------------------------------------------------------------- (0 rows) -- Recursively plan subquery in WHERE clause when the FROM clause has a subquery @@ -581,10 +581,10 @@ IN ) ORDER BY generate_series ASC; -DEBUG: generating subplan 63_1 for subquery SELECT value_2 FROM public.events_table -DEBUG: Plan 63 query after replacing subqueries and CTEs: SELECT generate_series FROM (SELECT generate_series.generate_series FROM generate_series(1, 10) generate_series(generate_series)) gst WHERE (generate_series OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('63_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) ORDER BY generate_series - generate_series ------------------ +DEBUG: generating subplan XXX_1 for subquery SELECT value_2 FROM public.events_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT generate_series FROM (SELECT generate_series.generate_series FROM generate_series(1, 10) generate_series(generate_series)) gst WHERE (generate_series OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) ORDER BY generate_series + generate_series +--------------------------------------------------------------------- 1 2 3 @@ -619,10 +619,10 @@ IN ) ORDER BY generate_series ASC; -DEBUG: generating subplan 65_1 for subquery SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT generate_series.generate_series FROM generate_series(1, 3) generate_series(generate_series))) -DEBUG: Plan 65 query after replacing subqueries and CTEs: SELECT generate_series FROM (SELECT generate_series.generate_series FROM generate_series(1, 10) generate_series(generate_series)) gst WHERE (generate_series OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('65_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) ORDER BY generate_series - generate_series ------------------ +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT generate_series.generate_series FROM generate_series(1, 3) generate_series(generate_series))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT generate_series FROM (SELECT generate_series.generate_series FROM generate_series(1, 10) generate_series(generate_series)) gst WHERE (generate_series OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) ORDER BY generate_series + generate_series +--------------------------------------------------------------------- 1 2 3 @@ -646,11 +646,11 @@ IN user_id FROM users_table); -DEBUG: generating subplan 67_1 for subquery SELECT id, value_1 FROM subquery_in_where.local_table -DEBUG: generating subplan 67_2 for subquery SELECT user_id FROM public.users_table -DEBUG: Plan 67 query after replacing subqueries and CTEs: SELECT id, value_1 FROM (SELECT intermediate_result.id, intermediate_result.value_1 FROM read_intermediate_result('67_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, value_1 integer)) sub_table WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('67_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) - id | value_1 -----+--------- +DEBUG: generating subplan XXX_1 for subquery SELECT id, value_1 FROM subquery_in_where.local_table +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT id, value_1 FROM (SELECT intermediate_result.id, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, value_1 integer)) sub_table WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) + id | value_1 +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -674,12 +674,12 @@ IN id FROM local_table); -DEBUG: generating subplan 69_1 for subquery SELECT id FROM subquery_in_where.local_table +DEBUG: generating subplan XXX_1 for subquery SELECT id FROM subquery_in_where.local_table DEBUG: push down of limit count: 10 -DEBUG: generating subplan 69_2 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table ORDER BY user_id LIMIT 10 -DEBUG: Plan 69 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('69_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) sub_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.id FROM read_intermediate_result('69_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer))) - count -------- +DEBUG: generating subplan XXX_2 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table ORDER BY user_id LIMIT 10 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) sub_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer))) + count +--------------------------------------------------------------------- 10 (1 row) diff --git a/src/test/regress/expected/subquery_local_tables.out b/src/test/regress/expected/subquery_local_tables.out index 56c0b6748..fa59df7d4 100644 --- a/src/test/regress/expected/subquery_local_tables.out +++ b/src/test/regress/expected/subquery_local_tables.out @@ -11,32 +11,32 @@ SET client_min_messages TO DEBUG1; SELECT foo.user_id FROM - (SELECT - DISTINCT users_table_local.user_id - FROM - users_table_local, events_table_local - WHERE - users_table_local.user_id = events_table_local.user_id AND + (SELECT + DISTINCT users_table_local.user_id + FROM + users_table_local, events_table_local + WHERE + users_table_local.user_id = events_table_local.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 ) as foo, - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8) ORDER BY 1 DESC LIMIT 5 ) as bar WHERE bar.user_id = foo.user_id ORDER BY 1 DESC; -DEBUG: generating subplan 3_1 for subquery SELECT DISTINCT users_table_local.user_id FROM subquery_local_tables.users_table_local, subquery_local_tables.events_table_local WHERE ((users_table_local.user_id OPERATOR(pg_catalog.=) events_table_local.user_id) AND (events_table_local.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table_local.user_id DESC LIMIT 5 +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT users_table_local.user_id FROM subquery_local_tables.users_table_local, subquery_local_tables.events_table_local WHERE ((users_table_local.user_id OPERATOR(pg_catalog.=) events_table_local.user_id) AND (events_table_local.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table_local.user_id DESC LIMIT 5 DEBUG: push down of limit count: 5 -DEBUG: generating subplan 3_2 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) ORDER BY users_table.user_id DESC LIMIT 5 -DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) foo.user_id) ORDER BY foo.user_id DESC - user_id ---------- +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) ORDER BY users_table.user_id DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) foo.user_id) ORDER BY foo.user_id DESC + user_id +--------------------------------------------------------------------- 6 5 4 @@ -48,29 +48,29 @@ DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT foo.user_id FRO SELECT foo.user_id FROM - (SELECT - DISTINCT users_table_local.user_id - FROM - users_table_local, events_table_local - WHERE - users_table_local.user_id = events_table_local.user_id AND + (SELECT + DISTINCT users_table_local.user_id + FROM + users_table_local, events_table_local + WHERE + users_table_local.user_id = events_table_local.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 ) as foo, - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8) ) as bar WHERE bar.user_id = foo.user_id ORDER BY 1 DESC; -DEBUG: generating subplan 5_1 for subquery SELECT DISTINCT users_table_local.user_id FROM subquery_local_tables.users_table_local, subquery_local_tables.events_table_local WHERE ((users_table_local.user_id OPERATOR(pg_catalog.=) events_table_local.user_id) AND (events_table_local.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table_local.user_id DESC LIMIT 5 -DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) foo.user_id) ORDER BY foo.user_id DESC - user_id ---------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT users_table_local.user_id FROM subquery_local_tables.users_table_local, subquery_local_tables.events_table_local WHERE ((users_table_local.user_id OPERATOR(pg_catalog.=) events_table_local.user_id) AND (events_table_local.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table_local.user_id DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) foo.user_id) ORDER BY foo.user_id DESC + user_id +--------------------------------------------------------------------- 6 5 4 @@ -81,14 +81,14 @@ DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT foo.user_id FRO -- subqueries in WHERE could be replaced even if they are on the local tables SELECT DISTINCT user_id FROM users_table -WHERE - user_id IN (SELECT DISTINCT value_2 FROM users_table_local WHERE value_1 = 1) +WHERE + user_id IN (SELECT DISTINCT value_2 FROM users_table_local WHERE value_1 = 1) ORDER BY 1 LIMIT 5; -DEBUG: generating subplan 6_1 for subquery SELECT DISTINCT value_2 FROM subquery_local_tables.users_table_local WHERE (value_1 OPERATOR(pg_catalog.=) 1) -DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) ORDER BY user_id LIMIT 5 +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT value_2 FROM subquery_local_tables.users_table_local WHERE (value_1 OPERATOR(pg_catalog.=) 1) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) ORDER BY user_id LIMIT 5 DEBUG: push down of limit count: 5 - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -96,31 +96,31 @@ DEBUG: push down of limit count: 5 5 (5 rows) --- subquery in FROM -> FROM -> FROM should be replaced if +-- subquery in FROM -> FROM -> FROM should be replaced if -- it contains onle local tables -SELECT - DISTINCT user_id -FROM +SELECT + DISTINCT user_id +FROM ( - SELECT users_table.user_id FROM users_table, + SELECT users_table.user_id FROM users_table, ( - SELECT + SELECT event_type, user_id FROM - (SELECT event_type, users_table.user_id FROM users_table, + (SELECT event_type, users_table.user_id FROM users_table, (SELECT user_id, event_type FROM events_table_local WHERE value_2 < 3 OFFSET 3) as foo WHERE foo.user_id = users_table.user_id ) bar ) as baz WHERE baz.user_id = users_table.user_id ) as sub1 - ORDER BY 1 DESC + ORDER BY 1 DESC LIMIT 3; -DEBUG: generating subplan 7_1 for subquery SELECT user_id, event_type FROM subquery_local_tables.events_table_local WHERE (value_2 OPERATOR(pg_catalog.<) 3) OFFSET 3 -DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT users_table.user_id FROM public.users_table, (SELECT bar.event_type, bar.user_id FROM (SELECT foo.event_type, users_table_1.user_id FROM public.users_table users_table_1, (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) users_table_1.user_id)) bar) baz WHERE (baz.user_id OPERATOR(pg_catalog.=) users_table.user_id)) sub1 ORDER BY user_id DESC LIMIT 3 +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, event_type FROM subquery_local_tables.events_table_local WHERE (value_2 OPERATOR(pg_catalog.<) 3) OFFSET 3 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT users_table.user_id FROM public.users_table, (SELECT bar.event_type, bar.user_id FROM (SELECT foo.event_type, users_table_1.user_id FROM public.users_table users_table_1, (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) users_table_1.user_id)) bar) baz WHERE (baz.user_id OPERATOR(pg_catalog.=) users_table.user_id)) sub1 ORDER BY user_id DESC LIMIT 3 DEBUG: push down of limit count: 3 - user_id ---------- + user_id +--------------------------------------------------------------------- 6 5 4 @@ -133,18 +133,18 @@ SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( - SELECT + SELECT u.user_id, e.event_type::text AS event, e.time - FROM + FROM users_table AS u, events_table AS e - WHERE u.user_id = e.user_id AND - u.user_id IN + WHERE u.user_id = e.user_id AND + u.user_id IN ( - SELECT - user_id - FROM - users_table + SELECT + user_id + FROM + users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table_local WHERE event_type > 1 AND event_type <= 3 AND value_3 > 1) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 3 AND event_type <= 4 AND value_3 > 1 AND user_id = users_table.user_id) @@ -154,20 +154,20 @@ FROM ( GROUP BY user_id ) q ORDER BY 2 DESC, 1; -DEBUG: generating subplan 8_1 for subquery SELECT user_id FROM subquery_local_tables.events_table_local WHERE ((event_type OPERATOR(pg_catalog.>) 1) AND (event_type OPERATOR(pg_catalog.<=) 3) AND (value_3 OPERATOR(pg_catalog.>) (1)::double precision)) +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM subquery_local_tables.events_table_local WHERE ((event_type OPERATOR(pg_catalog.>) 1) AND (event_type OPERATOR(pg_catalog.<=) 3) AND (value_3 OPERATOR(pg_catalog.>) (1)::double precision)) DEBUG: push down of limit count: 5 -DEBUG: generating subplan 8_2 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT intermediate_result.user_id FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))))) LIMIT 5 -DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT user_id, array_length(events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q ORDER BY (array_length(events_table, 1)) DESC, user_id - user_id | array_length ----------+-------------- +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))))) LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, array_length(events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q ORDER BY (array_length(events_table, 1)) DESC, user_id + user_id | array_length +--------------------------------------------------------------------- 5 | 364 (1 row) -- subquery (i.e., subquery_2) in WHERE->FROM should be replaced due to local tables -SELECT - user_id -FROM - users_table +SELECT + user_id +FROM + users_table WHERE user_id IN ( @@ -233,10 +233,10 @@ WHERE GROUP BY user_id HAVING count(*) > 1 AND sum(value_2) > 29 ORDER BY 1; -DEBUG: generating subplan 10_1 for subquery SELECT user_id, count(*) AS count_pay FROM subquery_local_tables.users_table_local WHERE ((user_id OPERATOR(pg_catalog.>=) 1) AND (user_id OPERATOR(pg_catalog.<=) 3) AND (value_1 OPERATOR(pg_catalog.>) 3) AND (value_1 OPERATOR(pg_catalog.<) 5)) GROUP BY user_id HAVING (count(*) OPERATOR(pg_catalog.>) 1) LIMIT 10 -DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT subquery_top.user_id FROM (SELECT subquery_1.user_id, subquery_2.count_pay FROM ((SELECT users_table_1.user_id, 'action=>1'::text AS event, events_table."time" FROM public.users_table users_table_1, public.events_table WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.>=) 1) AND (users_table_1.user_id OPERATOR(pg_catalog.<=) 3) AND (events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<) 3)) UNION SELECT users_table_1.user_id, 'action=>2'::text AS event, events_table."time" FROM public.users_table users_table_1, public.events_table WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.>=) 1) AND (users_table_1.user_id OPERATOR(pg_catalog.<=) 3) AND (events_table.event_type OPERATOR(pg_catalog.>) 2) AND (events_table.event_type OPERATOR(pg_catalog.<) 4))) subquery_1 LEFT JOIN (SELECT intermediate_result.user_id, intermediate_result.count_pay FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, count_pay bigint)) subquery_2 ON ((subquery_1.user_id OPERATOR(pg_catalog.=) subquery_2.user_id))) GROUP BY subquery_1.user_id, subquery_2.count_pay) subquery_top GROUP BY subquery_top.count_pay, subquery_top.user_id)) GROUP BY user_id HAVING ((count(*) OPERATOR(pg_catalog.>) 1) AND (sum(value_2) OPERATOR(pg_catalog.>) 29)) ORDER BY user_id - user_id ---------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id, count(*) AS count_pay FROM subquery_local_tables.users_table_local WHERE ((user_id OPERATOR(pg_catalog.>=) 1) AND (user_id OPERATOR(pg_catalog.<=) 3) AND (value_1 OPERATOR(pg_catalog.>) 3) AND (value_1 OPERATOR(pg_catalog.<) 5)) GROUP BY user_id HAVING (count(*) OPERATOR(pg_catalog.>) 1) LIMIT 10 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT subquery_top.user_id FROM (SELECT subquery_1.user_id, subquery_2.count_pay FROM ((SELECT users_table_1.user_id, 'action=>1'::text AS event, events_table."time" FROM public.users_table users_table_1, public.events_table WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.>=) 1) AND (users_table_1.user_id OPERATOR(pg_catalog.<=) 3) AND (events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<) 3)) UNION SELECT users_table_1.user_id, 'action=>2'::text AS event, events_table."time" FROM public.users_table users_table_1, public.events_table WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.>=) 1) AND (users_table_1.user_id OPERATOR(pg_catalog.<=) 3) AND (events_table.event_type OPERATOR(pg_catalog.>) 2) AND (events_table.event_type OPERATOR(pg_catalog.<) 4))) subquery_1 LEFT JOIN (SELECT intermediate_result.user_id, intermediate_result.count_pay FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, count_pay bigint)) subquery_2 ON ((subquery_1.user_id OPERATOR(pg_catalog.=) subquery_2.user_id))) GROUP BY subquery_1.user_id, subquery_2.count_pay) subquery_top GROUP BY subquery_top.count_pay, subquery_top.user_id)) GROUP BY user_id HAVING ((count(*) OPERATOR(pg_catalog.>) 1) AND (sum(value_2) OPERATOR(pg_catalog.>) 29)) ORDER BY user_id + user_id +--------------------------------------------------------------------- 2 3 (2 rows) diff --git a/src/test/regress/expected/subquery_partitioning.out b/src/test/regress/expected/subquery_partitioning.out index 856646b0c..f8c7ceb1d 100644 --- a/src/test/regress/expected/subquery_partitioning.out +++ b/src/test/regress/expected/subquery_partitioning.out @@ -19,9 +19,9 @@ SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('partitioning_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SET client_min_messages TO DEBUG1; @@ -37,10 +37,10 @@ FROM ) as foo ORDER BY 1 DESC; DEBUG: push down of limit count: 5 -DEBUG: generating subplan 3_1 for subquery SELECT DISTINCT id FROM subquery_and_partitioning.partitioning_test LIMIT 5 -DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT id FROM (SELECT intermediate_result.id FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) foo ORDER BY id DESC - id ----- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT id FROM subquery_and_partitioning.partitioning_test LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) foo ORDER BY id DESC + id +--------------------------------------------------------------------- 4 3 2 @@ -66,12 +66,12 @@ FROM WHERE foo.id = date_part('day', bar.time) ORDER BY 2 DESC, 1; DEBUG: push down of limit count: 5 -DEBUG: generating subplan 5_1 for subquery SELECT DISTINCT id FROM subquery_and_partitioning.partitioning_test LIMIT 5 +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT id FROM subquery_and_partitioning.partitioning_test LIMIT 5 DEBUG: push down of limit count: 5 -DEBUG: generating subplan 5_2 for subquery SELECT DISTINCT "time" FROM subquery_and_partitioning.partitioning_test LIMIT 5 -DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT foo.id, bar."time" FROM (SELECT intermediate_result.id FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) foo, (SELECT intermediate_result."time" FROM read_intermediate_result('5_2'::text, 'binary'::citus_copy_format) intermediate_result("time" date)) bar WHERE ((foo.id)::double precision OPERATOR(pg_catalog.=) date_part('day'::text, bar."time")) ORDER BY bar."time" DESC, foo.id - id | time -----+------------ +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT "time" FROM subquery_and_partitioning.partitioning_test LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.id, bar."time" FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) foo, (SELECT intermediate_result."time" FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result("time" date)) bar WHERE ((foo.id)::double precision OPERATOR(pg_catalog.=) date_part('day'::text, bar."time")) ORDER BY bar."time" DESC, foo.id + id | time +--------------------------------------------------------------------- 3 | 03-03-2010 (1 row) @@ -96,11 +96,11 @@ FROM ORDER BY 2 DESC, 1 DESC LIMIT 3; DEBUG: push down of limit count: 5 -DEBUG: generating subplan 8_1 for subquery SELECT DISTINCT "time" FROM subquery_and_partitioning.partitioning_test ORDER BY "time" DESC LIMIT 5 -DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT foo."time", bar.id FROM (SELECT intermediate_result."time" FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result("time" date)) foo, (SELECT DISTINCT partitioning_test.id FROM subquery_and_partitioning.partitioning_test) bar WHERE (date_part('day'::text, foo."time") OPERATOR(pg_catalog.=) (bar.id)::double precision) ORDER BY bar.id DESC, foo."time" DESC LIMIT 3 +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT "time" FROM subquery_and_partitioning.partitioning_test ORDER BY "time" DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo."time", bar.id FROM (SELECT intermediate_result."time" FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result("time" date)) foo, (SELECT DISTINCT partitioning_test.id FROM subquery_and_partitioning.partitioning_test) bar WHERE (date_part('day'::text, foo."time") OPERATOR(pg_catalog.=) (bar.id)::double precision) ORDER BY bar.id DESC, foo."time" DESC LIMIT 3 DEBUG: push down of limit count: 3 - time | id -------------+---- + time | id +--------------------------------------------------------------------- 03-03-2010 | 3 (1 row) @@ -126,11 +126,11 @@ FROM ORDER BY 2 DESC, 1 DESC LIMIT 3; DEBUG: push down of limit count: 5 -DEBUG: generating subplan 10_1 for subquery SELECT DISTINCT "time" FROM subquery_and_partitioning.partitioning_test ORDER BY "time" DESC LIMIT 5 -DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT foo."time", bar.id, partitioning_test.id, partitioning_test.value_1, partitioning_test."time" FROM (SELECT intermediate_result."time" FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result("time" date)) foo, (SELECT DISTINCT partitioning_test_1.id FROM subquery_and_partitioning.partitioning_test partitioning_test_1) bar, subquery_and_partitioning.partitioning_test WHERE ((date_part('day'::text, foo."time") OPERATOR(pg_catalog.=) (bar.id)::double precision) AND (partitioning_test.id OPERATOR(pg_catalog.=) bar.id)) ORDER BY bar.id DESC, foo."time" DESC LIMIT 3 +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT "time" FROM subquery_and_partitioning.partitioning_test ORDER BY "time" DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo."time", bar.id, partitioning_test.id, partitioning_test.value_1, partitioning_test."time" FROM (SELECT intermediate_result."time" FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result("time" date)) foo, (SELECT DISTINCT partitioning_test_1.id FROM subquery_and_partitioning.partitioning_test partitioning_test_1) bar, subquery_and_partitioning.partitioning_test WHERE ((date_part('day'::text, foo."time") OPERATOR(pg_catalog.=) (bar.id)::double precision) AND (partitioning_test.id OPERATOR(pg_catalog.=) bar.id)) ORDER BY bar.id DESC, foo."time" DESC LIMIT 3 DEBUG: push down of limit count: 3 - time | id | id | value_1 | time -------------+----+----+---------+------------ + time | id | id | value_1 | time +--------------------------------------------------------------------- 03-03-2010 | 3 | 3 | 3 | 11-22-2017 (1 row) @@ -139,10 +139,10 @@ SELECT DISTINCT id FROM partitioning_test WHERE id IN (SELECT DISTINCT date_part('day', time) FROM partitioning_test); -DEBUG: generating subplan 12_1 for subquery SELECT DISTINCT date_part('day'::text, "time") AS date_part FROM subquery_and_partitioning.partitioning_test -DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT DISTINCT id FROM subquery_and_partitioning.partitioning_test WHERE ((id)::double precision OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.date_part FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(date_part double precision))) - id ----- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT date_part('day'::text, "time") AS date_part FROM subquery_and_partitioning.partitioning_test +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT id FROM subquery_and_partitioning.partitioning_test WHERE ((id)::double precision OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.date_part FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(date_part double precision))) + id +--------------------------------------------------------------------- 3 (1 row) @@ -157,11 +157,11 @@ FROM ( SELECT user_id FROM users_table ) as bar -WHERE foo.value_1 = bar.user_id; -DEBUG: generating subplan 14_1 for subquery SELECT DISTINCT p1.value_1 FROM subquery_and_partitioning.partitioning_test p1, subquery_and_partitioning.partitioning_test p2 WHERE (p1.id OPERATOR(pg_catalog.=) p2.value_1) -DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_1 FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_1 OPERATOR(pg_catalog.=) bar.user_id) - count -------- +WHERE foo.value_1 = bar.user_id; +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT p1.value_1 FROM subquery_and_partitioning.partitioning_test p1, subquery_and_partitioning.partitioning_test p2 WHERE (p1.id OPERATOR(pg_catalog.=) p2.value_1) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_1 OPERATOR(pg_catalog.=) bar.user_id) + count +--------------------------------------------------------------------- 47 (1 row) @@ -206,26 +206,26 @@ FROM SELECT * FROM subquery_and_ctes ORDER BY 3 DESC, 1 DESC, 2 DESC, 4 DESC LIMIT 5; -DEBUG: generating subplan 16_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_partitioning.users_table_local), dist_cte AS (SELECT events_table.user_id FROM public.events_table, (SELECT DISTINCT partitioning_test.value_1 FROM subquery_and_partitioning.partitioning_test OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_1) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM public.users_table ORDER BY users_table.value_1 LIMIT 3)))) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) -DEBUG: generating subplan 17_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_partitioning.users_table_local -DEBUG: generating subplan 17_2 for CTE dist_cte: SELECT events_table.user_id FROM public.events_table, (SELECT DISTINCT partitioning_test.value_1 FROM subquery_and_partitioning.partitioning_test OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_1) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM public.users_table ORDER BY users_table.value_1 LIMIT 3))) +DEBUG: generating subplan XXX_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_partitioning.users_table_local), dist_cte AS (SELECT events_table.user_id FROM public.events_table, (SELECT DISTINCT partitioning_test.value_1 FROM subquery_and_partitioning.partitioning_test OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_1) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM public.users_table ORDER BY users_table.value_1 LIMIT 3)))) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_partitioning.users_table_local +DEBUG: generating subplan XXX_2 for CTE dist_cte: SELECT events_table.user_id FROM public.events_table, (SELECT DISTINCT partitioning_test.value_1 FROM subquery_and_partitioning.partitioning_test OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_1) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM public.users_table ORDER BY users_table.value_1 LIMIT 3))) DEBUG: push down of limit count: 3 -DEBUG: generating subplan 18_1 for subquery SELECT DISTINCT value_1 FROM public.users_table ORDER BY value_1 LIMIT 3 -DEBUG: generating subplan 18_2 for subquery SELECT DISTINCT value_1 FROM subquery_and_partitioning.partitioning_test OFFSET 0 -DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT events_table.user_id FROM public.events_table, (SELECT intermediate_result.value_1 FROM read_intermediate_result('18_2'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_1) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_1 FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)))) -DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('17_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT value_1 FROM public.users_table ORDER BY value_1 LIMIT 3 +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT value_1 FROM subquery_and_partitioning.partitioning_test OFFSET 0 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT events_table.user_id FROM public.events_table, (SELECT intermediate_result.value_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_1) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) DEBUG: push down of limit count: 5 -DEBUG: generating subplan 16_2 for subquery SELECT DISTINCT events_table.user_id FROM subquery_and_partitioning.partitioning_test, public.events_table WHERE ((events_table.user_id OPERATOR(pg_catalog.=) partitioning_test.id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY events_table.user_id DESC LIMIT 5 -DEBUG: generating subplan 16_3 for subquery SELECT count(*) AS cnt FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('16_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id) -DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT cnt, user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT foo.cnt, users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4 FROM (SELECT intermediate_result.cnt FROM read_intermediate_result('16_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) foo, public.users_table WHERE (foo.cnt OPERATOR(pg_catalog.>) users_table.value_2)) subquery_and_ctes ORDER BY "time" DESC, cnt DESC, user_id DESC, value_1 DESC LIMIT 5 +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT events_table.user_id FROM subquery_and_partitioning.partitioning_test, public.events_table WHERE ((events_table.user_id OPERATOR(pg_catalog.=) partitioning_test.id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY events_table.user_id DESC LIMIT 5 +DEBUG: generating subplan XXX_3 for subquery SELECT count(*) AS cnt FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cnt, user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT foo.cnt, users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4 FROM (SELECT intermediate_result.cnt FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) foo, public.users_table WHERE (foo.cnt OPERATOR(pg_catalog.>) users_table.value_2)) subquery_and_ctes ORDER BY "time" DESC, cnt DESC, user_id DESC, value_1 DESC LIMIT 5 DEBUG: push down of limit count: 5 - cnt | user_id | time | value_1 | value_2 | value_3 | value_4 ------+---------+---------------------------------+---------+---------+---------+--------- - 105 | 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 105 | 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 105 | 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | - 105 | 3 | Thu Nov 23 17:10:35.959913 2017 | 4 | 3 | 1 | - 105 | 5 | Thu Nov 23 16:48:32.08896 2017 | 5 | 2 | 1 | + cnt | user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 105 | 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 105 | 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 105 | 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | + 105 | 3 | Thu Nov 23 17:10:35.959913 2017 | 4 | 3 | 1 | + 105 | 5 | Thu Nov 23 16:48:32.08896 2017 | 5 | 2 | 1 | (5 rows) -- deep subquery, partitioned and non-partitioned tables together @@ -268,15 +268,15 @@ FROM ) as level_6, users_table WHERE users_table.user_id = level_6.min GROUP BY users_table.value_1 ) as bar; -DEBUG: generating subplan 23_1 for subquery SELECT count(*) AS cnt, value_1 FROM subquery_and_partitioning.partitioning_test GROUP BY value_1 -DEBUG: generating subplan 23_2 for subquery SELECT avg(events_table.event_type) AS avg FROM (SELECT level_1.cnt FROM (SELECT intermediate_result.cnt, intermediate_result.value_1 FROM read_intermediate_result('23_1'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint, value_1 integer)) level_1, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_1.cnt)) level_2, public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) level_2.cnt) GROUP BY level_2.cnt -DEBUG: generating subplan 23_3 for subquery SELECT max(users_table.value_1) AS mx_val_1 FROM (SELECT intermediate_result.avg FROM read_intermediate_result('23_2'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) level_3, public.users_table WHERE ((users_table.user_id)::numeric OPERATOR(pg_catalog.=) level_3.avg) GROUP BY level_3.avg -DEBUG: generating subplan 23_4 for subquery SELECT avg(events_table.event_type) AS avg_ev_type FROM (SELECT intermediate_result.mx_val_1 FROM read_intermediate_result('23_3'::text, 'binary'::citus_copy_format) intermediate_result(mx_val_1 integer)) level_4, public.events_table WHERE (level_4.mx_val_1 OPERATOR(pg_catalog.=) events_table.user_id) GROUP BY level_4.mx_val_1 -DEBUG: generating subplan 23_5 for subquery SELECT min(partitioning_test.value_1) AS min FROM (SELECT intermediate_result.avg_ev_type FROM read_intermediate_result('23_4'::text, 'binary'::citus_copy_format) intermediate_result(avg_ev_type numeric)) level_5, subquery_and_partitioning.partitioning_test WHERE (level_5.avg_ev_type OPERATOR(pg_catalog.=) (partitioning_test.id)::numeric) GROUP BY level_5.avg_ev_type -DEBUG: generating subplan 23_6 for subquery SELECT avg(level_6.min) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('23_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer)) level_6, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_6.min) GROUP BY users_table.value_1 -DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('23_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT count(*) AS cnt, value_1 FROM subquery_and_partitioning.partitioning_test GROUP BY value_1 +DEBUG: generating subplan XXX_2 for subquery SELECT avg(events_table.event_type) AS avg FROM (SELECT level_1.cnt FROM (SELECT intermediate_result.cnt, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint, value_1 integer)) level_1, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_1.cnt)) level_2, public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) level_2.cnt) GROUP BY level_2.cnt +DEBUG: generating subplan XXX_3 for subquery SELECT max(users_table.value_1) AS mx_val_1 FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) level_3, public.users_table WHERE ((users_table.user_id)::numeric OPERATOR(pg_catalog.=) level_3.avg) GROUP BY level_3.avg +DEBUG: generating subplan XXX_4 for subquery SELECT avg(events_table.event_type) AS avg_ev_type FROM (SELECT intermediate_result.mx_val_1 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(mx_val_1 integer)) level_4, public.events_table WHERE (level_4.mx_val_1 OPERATOR(pg_catalog.=) events_table.user_id) GROUP BY level_4.mx_val_1 +DEBUG: generating subplan XXX_5 for subquery SELECT min(partitioning_test.value_1) AS min FROM (SELECT intermediate_result.avg_ev_type FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(avg_ev_type numeric)) level_5, subquery_and_partitioning.partitioning_test WHERE (level_5.avg_ev_type OPERATOR(pg_catalog.=) (partitioning_test.id)::numeric) GROUP BY level_5.avg_ev_type +DEBUG: generating subplan XXX_6 for subquery SELECT avg(level_6.min) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer)) level_6, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_6.min) GROUP BY users_table.value_1 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar + count +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/subquery_prepared_statements.out b/src/test/regress/expected/subquery_prepared_statements.out index 3e8ae81cb..049206481 100644 --- a/src/test/regress/expected/subquery_prepared_statements.out +++ b/src/test/regress/expected/subquery_prepared_statements.out @@ -3,8 +3,8 @@ -- =================================================================== CREATE SCHEMA subquery_prepared_statements; SELECT run_command_on_workers('CREATE SCHEMA subquery_prepared_statements;'); - run_command_on_workers -------------------------------------- + run_command_on_workers +--------------------------------------------------------------------- (localhost,57637,t,"CREATE SCHEMA") (localhost,57638,t,"CREATE SCHEMA") (2 rows) @@ -12,45 +12,45 @@ SELECT run_command_on_workers('CREATE SCHEMA subquery_prepared_statements;'); SET search_path TO subquery_prepared_statements, public; CREATE TYPE subquery_prepared_statements.xy AS (x int, y int); SET client_min_messages TO DEBUG1; -PREPARE subquery_prepare_without_param AS +PREPARE subquery_prepare_without_param AS SELECT DISTINCT values_of_subquery FROM - (SELECT + (SELECT DISTINCT (users_table.user_id, events_table.event_type)::xy as values_of_subquery - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 ) as foo ORDER BY 1 DESC; -PREPARE subquery_prepare_param_on_partkey(int) AS +PREPARE subquery_prepare_param_on_partkey(int) AS SELECT DISTINCT values_of_subquery FROM - (SELECT + (SELECT DISTINCT (users_table.user_id, events_table.event_type)::xy as values_of_subquery - FROM - users_table, events_table - WHERE + FROM + users_table, events_table + WHERE users_table.user_id = events_table.user_id AND (users_table.user_id = $1 OR users_table.user_id = 2) AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 ) as foo ORDER BY 1 DESC; -PREPARE subquery_prepare_param_non_partkey(int) AS +PREPARE subquery_prepare_param_non_partkey(int) AS SELECT DISTINCT values_of_subquery FROM - (SELECT + (SELECT DISTINCT (users_table.user_id, events_table.event_type)::xy as values_of_subquery - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type = $1 ORDER BY 1 DESC LIMIT 5 ) as foo @@ -58,10 +58,10 @@ FROM -- execute each test with 6 times EXECUTE subquery_prepare_without_param; DEBUG: push down of limit count: 5 -DEBUG: generating subplan 1_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 -DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('1_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery --------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('XXX_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC + values_of_subquery +--------------------------------------------------------------------- (6,4) (6,3) (6,2) @@ -70,8 +70,8 @@ DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT DISTINCT values (5 rows) EXECUTE subquery_prepare_without_param; - values_of_subquery --------------------- + values_of_subquery +--------------------------------------------------------------------- (6,4) (6,3) (6,2) @@ -80,8 +80,8 @@ EXECUTE subquery_prepare_without_param; (5 rows) EXECUTE subquery_prepare_without_param; - values_of_subquery --------------------- + values_of_subquery +--------------------------------------------------------------------- (6,4) (6,3) (6,2) @@ -90,8 +90,8 @@ EXECUTE subquery_prepare_without_param; (5 rows) EXECUTE subquery_prepare_without_param; - values_of_subquery --------------------- + values_of_subquery +--------------------------------------------------------------------- (6,4) (6,3) (6,2) @@ -100,8 +100,8 @@ EXECUTE subquery_prepare_without_param; (5 rows) EXECUTE subquery_prepare_without_param; - values_of_subquery --------------------- + values_of_subquery +--------------------------------------------------------------------- (6,4) (6,3) (6,2) @@ -110,8 +110,8 @@ EXECUTE subquery_prepare_without_param; (5 rows) EXECUTE subquery_prepare_without_param; - values_of_subquery --------------------- + values_of_subquery +--------------------------------------------------------------------- (6,4) (6,3) (6,2) @@ -120,8 +120,8 @@ EXECUTE subquery_prepare_without_param; (5 rows) EXECUTE subquery_prepare_without_param; - values_of_subquery --------------------- + values_of_subquery +--------------------------------------------------------------------- (6,4) (6,3) (6,2) @@ -131,10 +131,10 @@ EXECUTE subquery_prepare_without_param; EXECUTE subquery_prepare_param_on_partkey(1); DEBUG: push down of limit count: 5 -DEBUG: generating subplan 3_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 -DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('3_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery --------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('XXX_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC + values_of_subquery +--------------------------------------------------------------------- (2,4) (2,3) (2,2) @@ -144,10 +144,10 @@ DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT DISTINCT values EXECUTE subquery_prepare_param_on_partkey(1); DEBUG: push down of limit count: 5 -DEBUG: generating subplan 5_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 -DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('5_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery --------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('XXX_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC + values_of_subquery +--------------------------------------------------------------------- (2,4) (2,3) (2,2) @@ -157,10 +157,10 @@ DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT DISTINCT values EXECUTE subquery_prepare_param_on_partkey(1); DEBUG: push down of limit count: 5 -DEBUG: generating subplan 7_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 -DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('7_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery --------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('XXX_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC + values_of_subquery +--------------------------------------------------------------------- (2,4) (2,3) (2,2) @@ -170,10 +170,10 @@ DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT DISTINCT values EXECUTE subquery_prepare_param_on_partkey(1); DEBUG: push down of limit count: 5 -DEBUG: generating subplan 9_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 -DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('9_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery --------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('XXX_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC + values_of_subquery +--------------------------------------------------------------------- (2,4) (2,3) (2,2) @@ -183,10 +183,10 @@ DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT DISTINCT values EXECUTE subquery_prepare_param_on_partkey(1); DEBUG: push down of limit count: 5 -DEBUG: generating subplan 11_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 -DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('11_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery --------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('XXX_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC + values_of_subquery +--------------------------------------------------------------------- (2,4) (2,3) (2,2) @@ -196,10 +196,10 @@ DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT DISTINCT value EXECUTE subquery_prepare_param_on_partkey(1); DEBUG: push down of limit count: 5 -DEBUG: generating subplan 14_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 -DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('14_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery --------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('XXX_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC + values_of_subquery +--------------------------------------------------------------------- (2,4) (2,3) (2,2) @@ -209,10 +209,10 @@ DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT DISTINCT value EXECUTE subquery_prepare_param_non_partkey(1); DEBUG: push down of limit count: 5 -DEBUG: generating subplan 16_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 -DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('16_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery --------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('XXX_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC + values_of_subquery +--------------------------------------------------------------------- (6,1) (5,1) (4,1) @@ -222,10 +222,10 @@ DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT DISTINCT value EXECUTE subquery_prepare_param_non_partkey(1); DEBUG: push down of limit count: 5 -DEBUG: generating subplan 18_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 -DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('18_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery --------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('XXX_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC + values_of_subquery +--------------------------------------------------------------------- (6,1) (5,1) (4,1) @@ -235,10 +235,10 @@ DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT DISTINCT value EXECUTE subquery_prepare_param_non_partkey(1); DEBUG: push down of limit count: 5 -DEBUG: generating subplan 20_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 -DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('20_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery --------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('XXX_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC + values_of_subquery +--------------------------------------------------------------------- (6,1) (5,1) (4,1) @@ -248,10 +248,10 @@ DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT DISTINCT value EXECUTE subquery_prepare_param_non_partkey(1); DEBUG: push down of limit count: 5 -DEBUG: generating subplan 22_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 -DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('22_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery --------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('XXX_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC + values_of_subquery +--------------------------------------------------------------------- (6,1) (5,1) (4,1) @@ -261,10 +261,10 @@ DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT DISTINCT value EXECUTE subquery_prepare_param_non_partkey(1); DEBUG: push down of limit count: 5 -DEBUG: generating subplan 24_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 -DEBUG: Plan 24 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('24_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery --------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('XXX_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC + values_of_subquery +--------------------------------------------------------------------- (6,1) (5,1) (4,1) @@ -274,10 +274,10 @@ DEBUG: Plan 24 query after replacing subqueries and CTEs: SELECT DISTINCT value EXECUTE subquery_prepare_param_non_partkey(1); DEBUG: push down of limit count: 5 -DEBUG: generating subplan 27_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 -DEBUG: Plan 27 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('27_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery --------------------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('XXX_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC + values_of_subquery +--------------------------------------------------------------------- (6,1) (5,1) (4,1) diff --git a/src/test/regress/expected/subquery_view.out b/src/test/regress/expected/subquery_view.out index 7724d8712..fcb5d0eba 100644 --- a/src/test/regress/expected/subquery_view.out +++ b/src/test/regress/expected/subquery_view.out @@ -7,23 +7,23 @@ CREATE TABLE users_table_local AS SELECT * FROM users_table; CREATE TABLE events_table_local AS SELECT * FROM events_table; SET client_min_messages TO DEBUG1; CREATE VIEW view_without_subquery AS -SELECT - DISTINCT users_table.value_1 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND +SELECT + DISTINCT users_table.value_1 + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC; -SELECT - * -FROM - view_without_subquery +SELECT + * +FROM + view_without_subquery ORDER BY 1 DESC LIMIT 5; -DEBUG: generating subplan 3_1 for subquery SELECT DISTINCT users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.value_1 DESC -DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT value_1 FROM (SELECT intermediate_result.value_1 FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) view_without_subquery ORDER BY value_1 DESC LIMIT 5 - value_1 ---------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.value_1 DESC +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT value_1 FROM (SELECT intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) view_without_subquery ORDER BY value_1 DESC LIMIT 5 + value_1 +--------------------------------------------------------------------- 5 4 3 @@ -32,25 +32,25 @@ DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT value_1 FROM (S (5 rows) CREATE VIEW view_without_subquery_second AS -SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND +SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5; -SELECT - * -FROM - view_without_subquery_second +SELECT + * +FROM + view_without_subquery_second ORDER BY 1; DEBUG: push down of limit count: 5 -DEBUG: generating subplan 5_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 -DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) view_without_subquery_second ORDER BY user_id - user_id ---------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) view_without_subquery_second ORDER BY user_id + user_id +--------------------------------------------------------------------- 2 3 4 @@ -63,22 +63,22 @@ CREATE VIEW subquery_limit AS SELECT user_id FROM - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 ) as foo ORDER BY 1 DESC; SELECT * FROM subquery_limit ORDER BY 1 DESC; DEBUG: push down of limit count: 5 -DEBUG: generating subplan 7_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 -DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo ORDER BY foo.user_id DESC) subquery_limit ORDER BY user_id DESC - user_id ---------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo ORDER BY foo.user_id DESC) subquery_limit ORDER BY user_id DESC + user_id +--------------------------------------------------------------------- 6 5 4 @@ -87,25 +87,25 @@ DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT user_id FROM (S (5 rows) -- subqueries in FROM clause with GROUP BY non-distribution column should be recursively planned -CREATE VIEW subquery_non_p_key_group_by AS +CREATE VIEW subquery_non_p_key_group_by AS SELECT * FROM - (SELECT - DISTINCT users_table.value_1 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.value_1 + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 ) as foo ORDER BY 1 DESC; SELECT * FROM subquery_non_p_key_group_by ORDER BY 1 DESC; -DEBUG: generating subplan 9_1 for subquery SELECT DISTINCT users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.value_1 -DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT value_1 FROM (SELECT foo.value_1 FROM (SELECT intermediate_result.value_1 FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) foo ORDER BY foo.value_1 DESC) subquery_non_p_key_group_by ORDER BY value_1 DESC - value_1 ---------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.value_1 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT value_1 FROM (SELECT foo.value_1 FROM (SELECT intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) foo ORDER BY foo.value_1 DESC) subquery_non_p_key_group_by ORDER BY value_1 DESC + value_1 +--------------------------------------------------------------------- 5 4 3 @@ -114,26 +114,26 @@ DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT value_1 FROM (S 0 (6 rows) -CREATE VIEW final_query_router AS +CREATE VIEW final_query_router AS SELECT * FROM - (SELECT + (SELECT users_table.value_2 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) GROUP BY users_table.value_2 ORDER BY 1 DESC ) as foo, - (SELECT + (SELECT users_table.value_3 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8) GROUP BY users_table.value_3 ORDER BY 1 DESC @@ -141,11 +141,11 @@ FROM WHERE foo.value_2 = bar.value_3 ORDER BY 2 DESC, 1; SELECT * FROM final_query_router ORDER BY 1; -DEBUG: generating subplan 11_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC -DEBUG: generating subplan 11_2 for subquery SELECT users_table.value_3 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) GROUP BY users_table.value_3 ORDER BY users_table.value_3 DESC -DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT value_2, value_3 FROM (SELECT foo.value_2, bar.value_3 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.value_3 FROM read_intermediate_result('11_2'::text, 'binary'::citus_copy_format) intermediate_result(value_3 double precision)) bar WHERE ((foo.value_2)::double precision OPERATOR(pg_catalog.=) bar.value_3) ORDER BY bar.value_3 DESC, foo.value_2) final_query_router ORDER BY value_2 - value_2 | value_3 ----------+--------- +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC +DEBUG: generating subplan XXX_2 for subquery SELECT users_table.value_3 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) GROUP BY users_table.value_3 ORDER BY users_table.value_3 DESC +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT value_2, value_3 FROM (SELECT foo.value_2, bar.value_3 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.value_3 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_3 double precision)) bar WHERE ((foo.value_2)::double precision OPERATOR(pg_catalog.=) bar.value_3) ORDER BY bar.value_3 DESC, foo.value_2) final_query_router ORDER BY value_2 + value_2 | value_3 +--------------------------------------------------------------------- 0 | 0 1 | 1 2 | 2 @@ -158,43 +158,43 @@ CREATE VIEW final_query_realtime AS SELECT * FROM - (SELECT + (SELECT users_table.value_2 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) GROUP BY users_table.value_2 ORDER BY 1 DESC ) as foo, - (SELECT + (SELECT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8) ORDER BY 1 DESC ) as bar WHERE foo.value_2 = bar.user_id ORDER BY 2 DESC, 1 DESC LIMIT 3; -SELECT - DISTINCT ON (users_table.value_2) users_table.value_2, time, value_3 -FROM +SELECT + DISTINCT ON (users_table.value_2) users_table.value_2, time, value_3 +FROM final_query_realtime, users_table -WHERE +WHERE users_table.user_id = final_query_realtime.user_id ORDER BY 1 DESC, 2 DESC, 3 DESC LIMIT 3; -DEBUG: generating subplan 14_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC +DEBUG: generating subplan XXX_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC DEBUG: push down of limit count: 3 -DEBUG: generating subplan 14_2 for subquery SELECT foo.value_2, bar.user_id FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) ORDER BY users_table.user_id DESC) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC, foo.value_2 DESC LIMIT 3 -DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT DISTINCT ON (users_table.value_2) users_table.value_2, users_table."time", users_table.value_3 FROM (SELECT intermediate_result.value_2, intermediate_result.user_id FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, user_id integer)) final_query_realtime, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) final_query_realtime.user_id) ORDER BY users_table.value_2 DESC, users_table."time" DESC, users_table.value_3 DESC LIMIT 3 +DEBUG: generating subplan XXX_2 for subquery SELECT foo.value_2, bar.user_id FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) ORDER BY users_table.user_id DESC) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC, foo.value_2 DESC LIMIT 3 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT ON (users_table.value_2) users_table.value_2, users_table."time", users_table.value_3 FROM (SELECT intermediate_result.value_2, intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, user_id integer)) final_query_realtime, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) final_query_realtime.user_id) ORDER BY users_table.value_2 DESC, users_table."time" DESC, users_table.value_3 DESC LIMIT 3 DEBUG: push down of limit count: 3 - value_2 | time | value_3 ----------+---------------------------------+--------- + value_2 | time | value_3 +--------------------------------------------------------------------- 5 | Thu Nov 23 16:28:38.455322 2017 | 4 4 | Thu Nov 23 10:22:39.468816 2017 | 3 3 | Thu Nov 23 15:55:08.493462 2017 | 3 @@ -203,18 +203,18 @@ DEBUG: push down of limit count: 3 CREATE VIEW subquery_in_where AS SELECT DISTINCT user_id FROM users_table -WHERE +WHERE user_id IN (SELECT DISTINCT value_2 FROM users_table WHERE value_1 >= 1 AND value_1 <= 20 ORDER BY 1 LIMIT 5); -SELECT - * -FROM +SELECT + * +FROM subquery_in_where ORDER BY 1 DESC; DEBUG: push down of limit count: 5 -DEBUG: generating subplan 17_1 for subquery SELECT DISTINCT value_2 FROM public.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5 -DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) subquery_in_where ORDER BY user_id DESC - user_id ---------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT value_2 FROM public.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) subquery_in_where ORDER BY user_id DESC + user_id +--------------------------------------------------------------------- 4 3 2 @@ -222,23 +222,23 @@ DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT user_id FROM ( (4 rows) -- subquery in FROM -> FROM -> WHERE should be replaced due to LIMIT -CREATE VIEW subquery_from_from_where AS +CREATE VIEW subquery_from_from_where AS SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( - SELECT + SELECT u.user_id, e.event_type::text AS event, e.time - FROM + FROM users_table AS u, events_table AS e - WHERE u.user_id = e.user_id AND - u.user_id IN + WHERE u.user_id = e.user_id AND + u.user_id IN ( - SELECT - user_id - FROM - users_table + SELECT + user_id + FROM + users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 1 AND event_type <= 3 AND value_3 > 1 AND user_id = users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 3 AND event_type <= 4 AND value_3 > 1 AND user_id = users_table.user_id) @@ -247,85 +247,85 @@ FROM ( ) t GROUP BY user_id ) q; -SELECT - * -FROM +SELECT + * +FROM subquery_from_from_where -ORDER BY +ORDER BY 2 DESC, 1; DEBUG: push down of limit count: 5 -DEBUG: generating subplan 19_1 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<=) 3) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))))) LIMIT 5 -DEBUG: Plan 19 query after replacing subqueries and CTEs: SELECT user_id, array_length FROM (SELECT q.user_id, array_length(q.events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('19_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q) subquery_from_from_where ORDER BY array_length DESC, user_id - user_id | array_length ----------+-------------- +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<=) 3) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))))) LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, array_length FROM (SELECT q.user_id, array_length(q.events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q) subquery_from_from_where ORDER BY array_length DESC, user_id + user_id | array_length +--------------------------------------------------------------------- 5 | 364 (1 row) --- subquery in FROM -> FROM -> FROM should be replaced if +-- subquery in FROM -> FROM -> FROM should be replaced if -- it contains onle local tables -CREATE VIEW subquery_from_from_where_local_table AS -SELECT - DISTINCT user_id -FROM +CREATE VIEW subquery_from_from_where_local_table AS +SELECT + DISTINCT user_id +FROM ( - SELECT users_table.user_id FROM users_table, + SELECT users_table.user_id FROM users_table, ( - SELECT + SELECT event_type, user_id FROM - (SELECT event_type, users_table.user_id FROM users_table, + (SELECT event_type, users_table.user_id FROM users_table, (SELECT user_id, event_type FROM events_table_local WHERE value_2 < 3 OFFSET 3) as foo WHERE foo.user_id = users_table.user_id ) bar ) as baz WHERE baz.user_id = users_table.user_id ) as sub1; -SELECT - * -FROM +SELECT + * +FROM subquery_from_from_where -ORDER BY 1 DESC +ORDER BY 1 DESC LIMIT 3; DEBUG: push down of limit count: 5 -DEBUG: generating subplan 21_1 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<=) 3) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))))) LIMIT 5 -DEBUG: Plan 21 query after replacing subqueries and CTEs: SELECT user_id, array_length FROM (SELECT q.user_id, array_length(q.events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('21_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q) subquery_from_from_where ORDER BY user_id DESC LIMIT 3 +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<=) 3) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))))) LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, array_length FROM (SELECT q.user_id, array_length(q.events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q) subquery_from_from_where ORDER BY user_id DESC LIMIT 3 DEBUG: push down of limit count: 3 - user_id | array_length ----------+-------------- + user_id | array_length +--------------------------------------------------------------------- 5 | 364 (1 row) SET citus.enable_repartition_joins to ON; CREATE VIEW repartition_view AS -SELECT - count(*) +SELECT + count(*) FROM ( SELECT DISTINCT users_table.value_2 FROM users_table, events_table WHERE users_table.user_id = events_table.value_2 AND users_table.user_id < 2 -) as foo, +) as foo, ( SELECT user_id FROM users_table ) as bar -WHERE foo.value_2 = bar.user_id; -SELECT - * -FROM +WHERE foo.value_2 = bar.user_id; +SELECT + * +FROM repartition_view; -DEBUG: generating subplan 23_1 for subquery SELECT DISTINCT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (users_table.user_id OPERATOR(pg_catalog.<) 2)) -DEBUG: generating subplan 23_2 for subquery SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('23_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) -DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('23_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) repartition_view - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (users_table.user_id OPERATOR(pg_catalog.<) 2)) +DEBUG: generating subplan XXX_2 for subquery SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) repartition_view + count +--------------------------------------------------------------------- 58 (1 row) CREATE VIEW all_executors_view AS -SELECT - count(*) +SELECT + count(*) FROM ( SELECT value_2 FROM users_table WHERE user_id = 15 OFFSET 0 -) as foo, +) as foo, ( SELECT user_id FROM users_table OFFSET 0 ) as bar, @@ -335,87 +335,87 @@ FROM ( SELECT user_id FROM users_table_local WHERE user_id = 2 ) baw -WHERE foo.value_2 = bar.user_id AND baz.value_2 = bar.user_id AND bar.user_id = baw.user_id; -SELECT - * -FROM +WHERE foo.value_2 = bar.user_id AND baz.value_2 = bar.user_id AND bar.user_id = baw.user_id; +SELECT + * +FROM all_executors_view; -DEBUG: generating subplan 26_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 15) OFFSET 0 -DEBUG: generating subplan 26_2 for subquery SELECT user_id FROM public.users_table OFFSET 0 -DEBUG: generating subplan 26_3 for subquery SELECT DISTINCT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (users_table.user_id OPERATOR(pg_catalog.<) 2)) -DEBUG: generating subplan 26_4 for subquery SELECT user_id FROM subquery_view.users_table_local WHERE (user_id OPERATOR(pg_catalog.=) 2) -DEBUG: generating subplan 26_5 for subquery SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('26_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar, (SELECT intermediate_result.value_2 FROM read_intermediate_result('26_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) baz, (SELECT intermediate_result.user_id FROM read_intermediate_result('26_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) baw WHERE ((foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) AND (baz.value_2 OPERATOR(pg_catalog.=) bar.user_id) AND (bar.user_id OPERATOR(pg_catalog.=) baw.user_id)) -DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('26_5'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) all_executors_view - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 15) OFFSET 0 +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table OFFSET 0 +DEBUG: generating subplan XXX_3 for subquery SELECT DISTINCT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (users_table.user_id OPERATOR(pg_catalog.<) 2)) +DEBUG: generating subplan XXX_4 for subquery SELECT user_id FROM subquery_view.users_table_local WHERE (user_id OPERATOR(pg_catalog.=) 2) +DEBUG: generating subplan XXX_5 for subquery SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar, (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) baz, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) baw WHERE ((foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) AND (baz.value_2 OPERATOR(pg_catalog.=) bar.user_id) AND (bar.user_id OPERATOR(pg_catalog.=) baw.user_id)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) all_executors_view + count +--------------------------------------------------------------------- 0 (1 row) SET citus.enable_repartition_joins to OFF; -- the same query, but this time the CTEs also live inside a subquery -CREATE VIEW subquery_and_ctes AS -SELECT - * -FROM +CREATE VIEW subquery_and_ctes AS +SELECT + * +FROM ( WITH cte AS ( WITH local_cte AS ( SELECT * FROM users_table_local ), dist_cte AS ( - SELECT + SELECT user_id - FROM - events_table, + FROM + events_table, (SELECT DISTINCT value_2 FROM users_table OFFSET 0) as foo - WHERE + WHERE events_table.user_id = foo.value_2 AND events_table.user_id IN (SELECT DISTINCT value_1 FROM users_table ORDER BY 1 LIMIT 3) ) SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id ) -SELECT +SELECT count(*) as cnt -FROM +FROM cte, - (SELECT - DISTINCT users_table.user_id - FROM + (SELECT + DISTINCT users_table.user_id + FROM users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 - ) as foo + ) as foo WHERE foo.user_id = cte.user_id ) as foo, users_table WHERE foo.cnt > users_table.value_2; SELECT * FROM subquery_and_ctes ORDER BY 3 DESC, 1 DESC, 2 DESC, 4 DESC LIMIT 5; -DEBUG: generating subplan 31_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_view.users_table_local), dist_cte AS (SELECT events_table.user_id FROM public.events_table, (SELECT DISTINCT users_table.value_2 FROM public.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM public.users_table ORDER BY users_table.value_1 LIMIT 3)))) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) -DEBUG: generating subplan 32_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_view.users_table_local -DEBUG: generating subplan 32_2 for CTE dist_cte: SELECT events_table.user_id FROM public.events_table, (SELECT DISTINCT users_table.value_2 FROM public.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM public.users_table ORDER BY users_table.value_1 LIMIT 3))) +DEBUG: generating subplan XXX_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_view.users_table_local), dist_cte AS (SELECT events_table.user_id FROM public.events_table, (SELECT DISTINCT users_table.value_2 FROM public.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM public.users_table ORDER BY users_table.value_1 LIMIT 3)))) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_view.users_table_local +DEBUG: generating subplan XXX_2 for CTE dist_cte: SELECT events_table.user_id FROM public.events_table, (SELECT DISTINCT users_table.value_2 FROM public.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM public.users_table ORDER BY users_table.value_1 LIMIT 3))) DEBUG: push down of limit count: 3 -DEBUG: generating subplan 33_1 for subquery SELECT DISTINCT value_1 FROM public.users_table ORDER BY value_1 LIMIT 3 -DEBUG: generating subplan 33_2 for subquery SELECT DISTINCT value_2 FROM public.users_table OFFSET 0 -DEBUG: Plan 33 query after replacing subqueries and CTEs: SELECT events_table.user_id FROM public.events_table, (SELECT intermediate_result.value_2 FROM read_intermediate_result('33_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_1 FROM read_intermediate_result('33_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)))) -DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('32_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('32_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT value_1 FROM public.users_table ORDER BY value_1 LIMIT 3 +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT value_2 FROM public.users_table OFFSET 0 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT events_table.user_id FROM public.events_table, (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)))) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) DEBUG: push down of limit count: 5 -DEBUG: generating subplan 31_2 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 -DEBUG: generating subplan 31_3 for subquery SELECT count(*) AS cnt FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('31_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id) -DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT cnt, user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT foo.cnt, users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4 FROM (SELECT intermediate_result.cnt FROM read_intermediate_result('31_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) foo, public.users_table WHERE (foo.cnt OPERATOR(pg_catalog.>) users_table.value_2)) subquery_and_ctes ORDER BY "time" DESC, cnt DESC, user_id DESC, value_1 DESC LIMIT 5 +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 +DEBUG: generating subplan XXX_3 for subquery SELECT count(*) AS cnt FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cnt, user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT foo.cnt, users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4 FROM (SELECT intermediate_result.cnt FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) foo, public.users_table WHERE (foo.cnt OPERATOR(pg_catalog.>) users_table.value_2)) subquery_and_ctes ORDER BY "time" DESC, cnt DESC, user_id DESC, value_1 DESC LIMIT 5 DEBUG: push down of limit count: 5 - cnt | user_id | time | value_1 | value_2 | value_3 | value_4 ------+---------+---------------------------------+---------+---------+---------+--------- - 432 | 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 432 | 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 432 | 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | - 432 | 3 | Thu Nov 23 17:10:35.959913 2017 | 4 | 3 | 1 | - 432 | 5 | Thu Nov 23 16:48:32.08896 2017 | 5 | 2 | 1 | + cnt | user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 432 | 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 432 | 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 432 | 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | + 432 | 3 | Thu Nov 23 17:10:35.959913 2017 | 4 | 3 | 1 | + 432 | 5 | Thu Nov 23 16:48:32.08896 2017 | 5 | 2 | 1 | (5 rows) -CREATE VIEW subquery_and_ctes_second AS -SELECT time, event_type, value_2, value_3 FROM +CREATE VIEW subquery_and_ctes_second AS +SELECT time, event_type, value_2, value_3 FROM ( WITH cte AS ( WITH local_cte AS ( @@ -428,29 +428,29 @@ SELECT time, event_type, value_2, value_3 FROM ) SELECT DISTINCT cte.user_id FROM users_table, cte - WHERE + WHERE users_table.user_id = cte.user_id AND - users_table.user_id IN + users_table.user_id IN (WITH cte_in_where AS (SELECT DISTINCT value_2 FROM users_table WHERE value_1 >= 1 AND value_1 <= 20 ORDER BY 1 LIMIT 5) SELECT * FROM cte_in_where) ORDER BY 1 DESC - ) as foo, - events_table - WHERE + ) as foo, + events_table + WHERE foo.user_id = events_table.value_2; SELECT * FROM subquery_and_ctes_second -ORDER BY 3 DESC, 2 DESC, 1 DESC +ORDER BY 3 DESC, 2 DESC, 1 DESC LIMIT 5; -DEBUG: generating subplan 38_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_view.users_table_local), dist_cte AS (SELECT events_table.user_id FROM public.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) -DEBUG: generating subplan 39_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_view.users_table_local -DEBUG: generating subplan 39_2 for CTE dist_cte: SELECT user_id FROM public.events_table -DEBUG: Plan 39 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('39_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('39_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) -DEBUG: generating subplan 38_2 for CTE cte_in_where: SELECT DISTINCT value_2 FROM public.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5 +DEBUG: generating subplan XXX_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_view.users_table_local), dist_cte AS (SELECT events_table.user_id FROM public.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_view.users_table_local +DEBUG: generating subplan XXX_2 for CTE dist_cte: SELECT user_id FROM public.events_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) +DEBUG: generating subplan XXX_2 for CTE cte_in_where: SELECT DISTINCT value_2 FROM public.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5 DEBUG: push down of limit count: 5 -DEBUG: generating subplan 38_3 for subquery SELECT DISTINCT cte.user_id FROM public.users_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('38_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE ((users_table.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT cte_in_where.value_2 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('38_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) cte_in_where))) ORDER BY cte.user_id DESC -DEBUG: Plan 38 query after replacing subqueries and CTEs: SELECT "time", event_type, value_2, value_3 FROM (SELECT events_table."time", events_table.event_type, events_table.value_2, events_table.value_3 FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('38_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, public.events_table WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.value_2)) subquery_and_ctes_second ORDER BY value_2 DESC, event_type DESC, "time" DESC LIMIT 5 +DEBUG: generating subplan XXX_3 for subquery SELECT DISTINCT cte.user_id FROM public.users_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE ((users_table.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT cte_in_where.value_2 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) cte_in_where))) ORDER BY cte.user_id DESC +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT "time", event_type, value_2, value_3 FROM (SELECT events_table."time", events_table.event_type, events_table.value_2, events_table.value_3 FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, public.events_table WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.value_2)) subquery_and_ctes_second ORDER BY value_2 DESC, event_type DESC, "time" DESC LIMIT 5 DEBUG: push down of limit count: 5 - time | event_type | value_2 | value_3 ----------------------------------+------------+---------+--------- + time | event_type | value_2 | value_3 +--------------------------------------------------------------------- Thu Nov 23 21:54:46.924477 2017 | 6 | 4 | 5 Wed Nov 22 21:24:22.849224 2017 | 5 | 4 | 1 Wed Nov 22 21:05:25.194441 2017 | 5 | 4 | 1 @@ -462,27 +462,27 @@ CREATE VIEW deep_subquery AS SELECT count(*) FROM ( - SELECT avg(min) FROM + SELECT avg(min) FROM ( SELECT min(users_table.value_1) FROM ( - SELECT avg(event_type) as avg_ev_type FROM + SELECT avg(event_type) as avg_ev_type FROM ( - SELECT - max(value_1) as mx_val_1 + SELECT + max(value_1) as mx_val_1 FROM ( - SELECT + SELECT avg(event_type) as avg FROM ( - SELECT - cnt - FROM + SELECT + cnt + FROM (SELECT count(*) as cnt, value_2 FROM users_table GROUP BY value_2) as level_1, users_table - WHERE + WHERE users_table.user_id = level_1.cnt ) as level_2, events_table - WHERE events_table.user_id = level_2.cnt + WHERE events_table.user_id = level_2.cnt GROUP BY level_2.cnt ) as level_3, users_table WHERE user_id = level_3.avg @@ -491,27 +491,27 @@ FROM WHERE level_4.mx_val_1 = events_table.user_id GROUP BY level_4.mx_val_1 ) as level_5, users_table - WHERE + WHERE level_5.avg_ev_type = users_table.user_id - GROUP BY + GROUP BY level_5.avg_ev_type ) as level_6, users_table WHERE users_table.user_id = level_6.min GROUP BY users_table.value_1 ) as bar; -SELECT - * -FROM +SELECT + * +FROM deep_subquery; -DEBUG: generating subplan 43_1 for subquery SELECT count(*) AS cnt, value_2 FROM public.users_table GROUP BY value_2 -DEBUG: generating subplan 43_2 for subquery SELECT avg(events_table.event_type) AS avg FROM (SELECT level_1.cnt FROM (SELECT intermediate_result.cnt, intermediate_result.value_2 FROM read_intermediate_result('43_1'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint, value_2 integer)) level_1, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_1.cnt)) level_2, public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) level_2.cnt) GROUP BY level_2.cnt -DEBUG: generating subplan 43_3 for subquery SELECT max(users_table.value_1) AS mx_val_1 FROM (SELECT intermediate_result.avg FROM read_intermediate_result('43_2'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) level_3, public.users_table WHERE ((users_table.user_id)::numeric OPERATOR(pg_catalog.=) level_3.avg) GROUP BY level_3.avg -DEBUG: generating subplan 43_4 for subquery SELECT avg(events_table.event_type) AS avg_ev_type FROM (SELECT intermediate_result.mx_val_1 FROM read_intermediate_result('43_3'::text, 'binary'::citus_copy_format) intermediate_result(mx_val_1 integer)) level_4, public.events_table WHERE (level_4.mx_val_1 OPERATOR(pg_catalog.=) events_table.user_id) GROUP BY level_4.mx_val_1 -DEBUG: generating subplan 43_5 for subquery SELECT min(users_table.value_1) AS min FROM (SELECT intermediate_result.avg_ev_type FROM read_intermediate_result('43_4'::text, 'binary'::citus_copy_format) intermediate_result(avg_ev_type numeric)) level_5, public.users_table WHERE (level_5.avg_ev_type OPERATOR(pg_catalog.=) (users_table.user_id)::numeric) GROUP BY level_5.avg_ev_type -DEBUG: generating subplan 43_6 for subquery SELECT avg(level_6.min) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('43_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer)) level_6, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_6.min) GROUP BY users_table.value_1 -DEBUG: generating subplan 43_7 for subquery SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('43_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar -DEBUG: Plan 43 query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('43_7'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) deep_subquery - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT count(*) AS cnt, value_2 FROM public.users_table GROUP BY value_2 +DEBUG: generating subplan XXX_2 for subquery SELECT avg(events_table.event_type) AS avg FROM (SELECT level_1.cnt FROM (SELECT intermediate_result.cnt, intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint, value_2 integer)) level_1, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_1.cnt)) level_2, public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) level_2.cnt) GROUP BY level_2.cnt +DEBUG: generating subplan XXX_3 for subquery SELECT max(users_table.value_1) AS mx_val_1 FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) level_3, public.users_table WHERE ((users_table.user_id)::numeric OPERATOR(pg_catalog.=) level_3.avg) GROUP BY level_3.avg +DEBUG: generating subplan XXX_4 for subquery SELECT avg(events_table.event_type) AS avg_ev_type FROM (SELECT intermediate_result.mx_val_1 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(mx_val_1 integer)) level_4, public.events_table WHERE (level_4.mx_val_1 OPERATOR(pg_catalog.=) events_table.user_id) GROUP BY level_4.mx_val_1 +DEBUG: generating subplan XXX_5 for subquery SELECT min(users_table.value_1) AS min FROM (SELECT intermediate_result.avg_ev_type FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(avg_ev_type numeric)) level_5, public.users_table WHERE (level_5.avg_ev_type OPERATOR(pg_catalog.=) (users_table.user_id)::numeric) GROUP BY level_5.avg_ev_type +DEBUG: generating subplan XXX_6 for subquery SELECT avg(level_6.min) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer)) level_6, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_6.min) GROUP BY users_table.value_1 +DEBUG: generating subplan XXX_7 for subquery SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_7'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) deep_subquery + count +--------------------------------------------------------------------- 0 (1 row) @@ -519,24 +519,24 @@ CREATE VIEW result_of_view_is_also_recursively_planned AS SELECT user_id FROM - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 ) as foo ORDER BY 1 DESC; -SELECT +SELECT * FROM - (SELECT - * + (SELECT + * FROM - result_of_view_is_also_recursively_planned, events_table - WHERE + result_of_view_is_also_recursively_planned, events_table + WHERE events_table.value_2 = result_of_view_is_also_recursively_planned.user_id ORDER BY time DESC LIMIT 5 @@ -544,17 +544,17 @@ FROM ) as foo ORDER BY time DESC LIMIT 5; DEBUG: push down of limit count: 5 -DEBUG: generating subplan 51_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: push down of limit count: 9 -DEBUG: generating subplan 51_2 for subquery SELECT result_of_view_is_also_recursively_planned.user_id, events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4 FROM (SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('51_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo ORDER BY foo.user_id DESC) result_of_view_is_also_recursively_planned, public.events_table WHERE (events_table.value_2 OPERATOR(pg_catalog.=) result_of_view_is_also_recursively_planned.user_id) ORDER BY events_table."time" DESC OFFSET 4 LIMIT 5 -DEBUG: Plan 51 query after replacing subqueries and CTEs: SELECT user_id, user_id_1 AS user_id, "time", event_type, value_2, value_3, value_4 FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_1 AS user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('51_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_1 integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint)) foo(user_id, user_id_1, "time", event_type, value_2, value_3, value_4) ORDER BY "time" DESC LIMIT 5 - user_id | user_id | time | event_type | value_2 | value_3 | value_4 ----------+---------+---------------------------------+------------+---------+---------+--------- - 2 | 3 | Thu Nov 23 16:44:41.903713 2017 | 4 | 2 | 2 | - 2 | 5 | Thu Nov 23 16:11:02.929469 2017 | 4 | 2 | 0 | - 4 | 5 | Thu Nov 23 14:40:40.467511 2017 | 1 | 4 | 1 | - 3 | 2 | Thu Nov 23 14:02:47.738901 2017 | 1 | 3 | 2 | - 3 | 6 | Thu Nov 23 14:00:13.20013 2017 | 3 | 3 | 3 | +DEBUG: generating subplan XXX_2 for subquery SELECT result_of_view_is_also_recursively_planned.user_id, events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4 FROM (SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo ORDER BY foo.user_id DESC) result_of_view_is_also_recursively_planned, public.events_table WHERE (events_table.value_2 OPERATOR(pg_catalog.=) result_of_view_is_also_recursively_planned.user_id) ORDER BY events_table."time" DESC OFFSET 4 LIMIT 5 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, user_id_1 AS user_id, "time", event_type, value_2, value_3, value_4 FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_1 AS user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_1 integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint)) foo(user_id, user_id_1, "time", event_type, value_2, value_3, value_4) ORDER BY "time" DESC LIMIT 5 + user_id | user_id | time | event_type | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 2 | 3 | Thu Nov 23 16:44:41.903713 2017 | 4 | 2 | 2 | + 2 | 5 | Thu Nov 23 16:11:02.929469 2017 | 4 | 2 | 0 | + 4 | 5 | Thu Nov 23 14:40:40.467511 2017 | 1 | 4 | 1 | + 3 | 2 | Thu Nov 23 14:02:47.738901 2017 | 1 | 3 | 2 | + 3 | 6 | Thu Nov 23 14:00:13.20013 2017 | 3 | 3 | 3 | (5 rows) SET client_min_messages TO DEFAULT; diff --git a/src/test/regress/expected/task_tracker_assign_task.out b/src/test/regress/expected/task_tracker_assign_task.out index d9dff929b..85085e848 100644 --- a/src/test/regress/expected/task_tracker_assign_task.out +++ b/src/test/regress/expected/task_tracker_assign_task.out @@ -14,76 +14,76 @@ SELECT task_tracker_assign_task(:JobId, :SimpleTaskId, 'COPY (SELECT * FROM lineitem) TO ' '''base/pgsql_job_cache/job_401010/task_101101'''); - task_tracker_assign_task --------------------------- - + task_tracker_assign_task +--------------------------------------------------------------------- + (1 row) SELECT task_tracker_assign_task(:JobId, :RecoverableTaskId, :BadQueryString); - task_tracker_assign_task --------------------------- - + task_tracker_assign_task +--------------------------------------------------------------------- + (1 row) -- After assigning the two tasks, we wait for them to make progress. Note that -- these tasks get scheduled and run asynchronously, so if the sleep interval is -- not enough, the regression tests may fail on an overloaded box. SELECT pg_sleep(3.0); - pg_sleep ----------- - + pg_sleep +--------------------------------------------------------------------- + (1 row) SELECT task_tracker_task_status(:JobId, :SimpleTaskId); - task_tracker_task_status --------------------------- + task_tracker_task_status +--------------------------------------------------------------------- 6 (1 row) SELECT task_tracker_task_status(:JobId, :RecoverableTaskId); - task_tracker_task_status --------------------------- + task_tracker_task_status +--------------------------------------------------------------------- 5 (1 row) COPY :SimpleTaskTable FROM 'base/pgsql_job_cache/job_401010/task_101101'; SELECT COUNT(*) FROM :SimpleTaskTable; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) SELECT COUNT(*) AS diff_lhs FROM ( :SelectAll FROM :SimpleTaskTable EXCEPT ALL :SelectAll FROM lineitem ) diff; - diff_lhs ----------- + diff_lhs +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) As diff_rhs FROM ( :SelectAll FROM lineitem EXCEPT ALL :SelectAll FROM :SimpleTaskTable ) diff; - diff_rhs ----------- + diff_rhs +--------------------------------------------------------------------- 0 (1 row) -- We now reassign the recoverable task with a good query string. This updates -- the task's query string, and reschedules the updated task for execution. SELECT task_tracker_assign_task(:JobId, :RecoverableTaskId, :GoodQueryString); - task_tracker_assign_task --------------------------- - + task_tracker_assign_task +--------------------------------------------------------------------- + (1 row) SELECT pg_sleep(2.0); - pg_sleep ----------- - + pg_sleep +--------------------------------------------------------------------- + (1 row) SELECT task_tracker_task_status(:JobId, :RecoverableTaskId); - task_tracker_task_status --------------------------- + task_tracker_task_status +--------------------------------------------------------------------- 6 (1 row) diff --git a/src/test/regress/expected/task_tracker_cleanup_job.out b/src/test/regress/expected/task_tracker_cleanup_job.out index 4b5ade859..d8156de13 100644 --- a/src/test/regress/expected/task_tracker_cleanup_job.out +++ b/src/test/regress/expected/task_tracker_cleanup_job.out @@ -7,26 +7,26 @@ SET citus.next_shard_id TO 1060000; \set RunningTaskId 801108 -- Test worker_cleanup_job_schema_cache SELECT * FROM task_tracker_assign_task(2, 2, ''); - task_tracker_assign_task --------------------------- - + task_tracker_assign_task +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_catalog.pg_namespace WHERE nspname = 'pg_merge_job_0002'; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) SELECT worker_cleanup_job_schema_cache(); - worker_cleanup_job_schema_cache ---------------------------------- - + worker_cleanup_job_schema_cache +--------------------------------------------------------------------- + (1 row) SELECT count(*) FROM pg_catalog.pg_namespace WHERE nspname = 'pg_merge_job_0002'; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -35,60 +35,60 @@ SELECT count(*) FROM pg_catalog.pg_namespace WHERE nspname = 'pg_merge_job_0002' SELECT task_tracker_assign_task(:JobId, :CompletedTaskId, 'COPY (SELECT * FROM lineitem) TO ' '''base/pgsql_job_cache/job_401010/task_801107'''); - task_tracker_assign_task --------------------------- - + task_tracker_assign_task +--------------------------------------------------------------------- + (1 row) SELECT task_tracker_assign_task(:JobId, :RunningTaskId, 'SELECT pg_sleep(100)'); - task_tracker_assign_task --------------------------- - + task_tracker_assign_task +--------------------------------------------------------------------- + (1 row) SELECT pg_sleep(2.0); - pg_sleep ----------- - + pg_sleep +--------------------------------------------------------------------- + (1 row) SELECT task_tracker_task_status(:JobId, :CompletedTaskId); - task_tracker_task_status --------------------------- + task_tracker_task_status +--------------------------------------------------------------------- 6 (1 row) SELECT task_tracker_task_status(:JobId, :RunningTaskId); - task_tracker_task_status --------------------------- + task_tracker_task_status +--------------------------------------------------------------------- 3 (1 row) SELECT isdir FROM pg_stat_file('base/pgsql_job_cache/job_401010/task_801107'); - isdir -------- + isdir +--------------------------------------------------------------------- f (1 row) SELECT isdir FROM pg_stat_file('base/pgsql_job_cache/job_401010'); - isdir -------- + isdir +--------------------------------------------------------------------- t (1 row) -- We now clean up all tasks for this job id. As a result, shared hash entries, -- files, and connections associated with these tasks should all be cleaned up. SELECT task_tracker_cleanup_job(:JobId); - task_tracker_cleanup_job --------------------------- - + task_tracker_cleanup_job +--------------------------------------------------------------------- + (1 row) SELECT pg_sleep(1.0); - pg_sleep ----------- - + pg_sleep +--------------------------------------------------------------------- + (1 row) SELECT task_tracker_task_status(:JobId, :CompletedTaskId); @@ -103,8 +103,8 @@ SELECT isdir FROM pg_stat_file('base/pgsql_job_cache/job_401010'); ERROR: could not stat file "base/pgsql_job_cache/job_401010": No such file or directory -- Also clean up worker_cleanup_job_schema_cache job SELECT task_tracker_cleanup_job(2); - task_tracker_cleanup_job --------------------------- - + task_tracker_cleanup_job +--------------------------------------------------------------------- + (1 row) diff --git a/src/test/regress/expected/task_tracker_partition_task.out b/src/test/regress/expected/task_tracker_partition_task.out index 6d30e3d48..0bc0b1389 100644 --- a/src/test/regress/expected/task_tracker_partition_task.out +++ b/src/test/regress/expected/task_tracker_partition_task.out @@ -22,20 +22,20 @@ SELECT task_tracker_assign_task(:JobId, :PartitionTaskId, 'SELECT worker_range_partition_table(' '401010, 801106, ''SELECT * FROM lineitem'', ' '''l_orderkey'', 20, ARRAY[1000, 3000]::_int8)'); - task_tracker_assign_task --------------------------- - + task_tracker_assign_task +--------------------------------------------------------------------- + (1 row) SELECT pg_sleep(4.0); - pg_sleep ----------- - + pg_sleep +--------------------------------------------------------------------- + (1 row) SELECT task_tracker_task_status(:JobId, :PartitionTaskId); - task_tracker_task_status --------------------------- + task_tracker_task_status +--------------------------------------------------------------------- 6 (1 row) @@ -43,14 +43,14 @@ COPY :TablePart00 FROM :'Table_File_00'; COPY :TablePart01 FROM :'Table_File_01'; COPY :TablePart02 FROM :'Table_File_02'; SELECT COUNT(*) FROM :TablePart00; - count -------- + count +--------------------------------------------------------------------- 1004 (1 row) SELECT COUNT(*) FROM :TablePart02; - count -------- + count +--------------------------------------------------------------------- 8970 (1 row) @@ -59,8 +59,8 @@ SELECT COUNT(*) FROM :TablePart02; SELECT COUNT(*) AS diff_lhs_00 FROM ( :SelectAll FROM :TablePart00 EXCEPT ALL :SelectAll FROM lineitem WHERE :PartitionColumn < 1000 ) diff; - diff_lhs_00 -------------- + diff_lhs_00 +--------------------------------------------------------------------- 0 (1 row) @@ -68,24 +68,24 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :SelectAll FROM :TablePart01 EXCEPT ALL :SelectAll FROM lineitem WHERE :PartitionColumn >= 1000 AND :PartitionColumn < 3000 ) diff; - diff_lhs_01 -------------- + diff_lhs_01 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_02 FROM ( :SelectAll FROM :TablePart02 EXCEPT ALL :SelectAll FROM lineitem WHERE :PartitionColumn >= 3000 ) diff; - diff_lhs_02 -------------- + diff_lhs_02 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_00 FROM ( :SelectAll FROM lineitem WHERE :PartitionColumn < 1000 EXCEPT ALL :SelectAll FROM :TablePart00 ) diff; - diff_rhs_00 -------------- + diff_rhs_00 +--------------------------------------------------------------------- 0 (1 row) @@ -93,16 +93,16 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :SelectAll FROM lineitem WHERE :PartitionColumn >= 1000 AND :PartitionColumn < 3000 EXCEPT ALL :SelectAll FROM :TablePart01 ) diff; - diff_rhs_01 -------------- + diff_rhs_01 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_02 FROM ( :SelectAll FROM lineitem WHERE :PartitionColumn >= 3000 EXCEPT ALL :SelectAll FROM :TablePart02 ) diff; - diff_rhs_02 -------------- + diff_rhs_02 +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/upgrade_basic_after.out b/src/test/regress/expected/upgrade_basic_after.out index 4cd980fcf..88e60c649 100644 --- a/src/test/regress/expected/upgrade_basic_after.out +++ b/src/test/regress/expected/upgrade_basic_after.out @@ -1,40 +1,40 @@ SET search_path TO upgrade_basic, public, pg_catalog; BEGIN; SELECT * FROM pg_indexes WHERE schemaname = 'upgrade_basic' ORDER BY tablename; - schemaname | tablename | indexname | tablespace | indexdef ----------------+-----------+-----------+------------+----------------------------------------------------------------- + schemaname | tablename | indexname | tablespace | indexdef +--------------------------------------------------------------------- upgrade_basic | r | r_pkey | | CREATE UNIQUE INDEX r_pkey ON upgrade_basic.r USING btree (a) upgrade_basic | t | t_a_idx | | CREATE INDEX t_a_idx ON upgrade_basic.t USING hash (a) upgrade_basic | tp | tp_pkey | | CREATE UNIQUE INDEX tp_pkey ON upgrade_basic.tp USING btree (a) (3 rows) SELECT nextval('pg_dist_shardid_seq') = MAX(shardid)+1 FROM pg_dist_shard; - ?column? ----------- + ?column? +--------------------------------------------------------------------- t (1 row) SELECT nextval('pg_dist_placement_placementid_seq') = MAX(placementid)+1 FROM pg_dist_placement; - ?column? ----------- + ?column? +--------------------------------------------------------------------- t (1 row) SELECT nextval('pg_dist_groupid_seq') = MAX(groupid)+1 FROM pg_dist_node; - ?column? ----------- + ?column? +--------------------------------------------------------------------- t (1 row) SELECT nextval('pg_dist_node_nodeid_seq') = MAX(nodeid)+1 FROM pg_dist_node; - ?column? ----------- + ?column? +--------------------------------------------------------------------- t (1 row) SELECT nextval('pg_dist_colocationid_seq') = MAX(colocationid)+1 FROM pg_dist_colocation; - ?column? ----------- + ?column? +--------------------------------------------------------------------- t (1 row) @@ -50,8 +50,8 @@ SELECT sequence_name FROM information_schema.sequences 'pg_dist_node_nodeid_seq', 'pg_dist_colocationid_seq' ); - sequence_name ---------------- + sequence_name +--------------------------------------------------------------------- (0 rows) SELECT logicalrelid FROM pg_dist_partition @@ -61,8 +61,8 @@ SELECT logicalrelid FROM pg_dist_partition refobjid=(select oid FROM pg_extension WHERE extname = 'citus') AND relnamespace='upgrade_basic'::regnamespace ORDER BY logicalrelid; - logicalrelid --------------- + logicalrelid +--------------------------------------------------------------------- t tp t_ab @@ -79,8 +79,8 @@ SELECT tgrelid::regclass, tgfoid::regproc, tgisinternal, tgenabled, tgtype::int4 relnamespace='upgrade_basic'::regnamespace AND tgname LIKE 'truncate_trigger_%' ORDER BY tgrelid::regclass; - tgrelid | tgfoid | tgisinternal | tgenabled | tgtype -----------+------------------------+--------------+-----------+---------- + tgrelid | tgfoid | tgisinternal | tgenabled | tgtype +--------------------------------------------------------------------- t | citus_truncate_trigger | t | O | 00100000 tp | citus_truncate_trigger | t | O | 00100000 t_ab | citus_truncate_trigger | t | O | 00100000 @@ -90,8 +90,8 @@ SELECT tgrelid::regclass, tgfoid::regproc, tgisinternal, tgenabled, tgtype::int4 (6 rows) SELECT * FROM t ORDER BY a; - a ---- + a +--------------------------------------------------------------------- 1 2 3 @@ -100,31 +100,31 @@ SELECT * FROM t ORDER BY a; (5 rows) SELECT * FROM t WHERE a = 1; - a ---- + a +--------------------------------------------------------------------- 1 (1 row) INSERT INTO t SELECT * FROM generate_series(10, 15); EXPLAIN (COSTS FALSE) SELECT * from t; - QUERY PLAN ---------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 32 Tasks Shown: One of 32 -> Task - Node: host=localhost port=57636 dbname=postgres + Node: host=localhost port=xxxxx dbname=postgres -> Seq Scan on t_102008 t (6 rows) EXPLAIN (COSTS FALSE) SELECT * from t WHERE a = 1; - QUERY PLAN ---------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57637 dbname=postgres + Node: host=localhost port=xxxxx dbname=postgres -> Bitmap Heap Scan on t_102009 t Recheck Cond: (a = 1) -> Bitmap Index Scan on t_a_idx_102009 @@ -132,22 +132,22 @@ EXPLAIN (COSTS FALSE) SELECT * from t WHERE a = 1; (9 rows) SELECT * FROM t WHERE a = 10; - a ----- + a +--------------------------------------------------------------------- 10 (1 row) SELECT * FROM t WHERE a = 11; - a ----- + a +--------------------------------------------------------------------- 11 (1 row) COPY t FROM PROGRAM 'echo 20 && echo 21 && echo 22 && echo 23 && echo 24' WITH CSV; ALTER TABLE t ADD COLUMN b int DEFAULT 10; SELECT * FROM t ORDER BY a; - a | b -----+---- + a | b +--------------------------------------------------------------------- 1 | 10 2 | 10 3 | 10 @@ -168,38 +168,38 @@ SELECT * FROM t ORDER BY a; TRUNCATE TABLE t; SELECT * FROM T; - a | b ----+--- + a | b +--------------------------------------------------------------------- (0 rows) DROP TABLE t; \d t -- verify that the table whose column is dropped before a pg_upgrade still works as expected. SELECT * FROM t_ab ORDER BY b; - b ----- + b +--------------------------------------------------------------------- 11 22 33 (3 rows) SELECT * FROM t_ab WHERE b = 11; - b ----- + b +--------------------------------------------------------------------- 11 (1 row) SELECT * FROM t_ab WHERE b = 22; - b ----- + b +--------------------------------------------------------------------- 22 (1 row) -- Check that we can create a distributed table out of a table that was created -- before the upgrade SELECT * FROM t2 ORDER BY a; - a | b ----+---- + a | b +--------------------------------------------------------------------- 1 | 11 2 | 22 3 | 33 @@ -207,14 +207,14 @@ SELECT * FROM t2 ORDER BY a; SELECT create_distributed_table('t2', 'a'); NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT * FROM t2 ORDER BY a; - a | b ----+---- + a | b +--------------------------------------------------------------------- 1 | 11 2 | 22 3 | 33 @@ -224,8 +224,8 @@ ROLLBACK; BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT * FROM r ORDER BY a; - a ---- + a +--------------------------------------------------------------------- 1 2 3 @@ -234,8 +234,8 @@ SELECT * FROM r ORDER BY a; (5 rows) SELECT * FROM tr ORDER BY pk; - pk | a -----+--- + pk | a +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -245,8 +245,8 @@ SELECT * FROM tr ORDER BY pk; DELETE FROM r where a = 1; SELECT * FROM r ORDER BY a; - a ---- + a +--------------------------------------------------------------------- 2 3 4 @@ -254,8 +254,8 @@ SELECT * FROM r ORDER BY a; (4 rows) SELECT * FROM tr ORDER BY pk; - pk | a -----+--- + pk | a +--------------------------------------------------------------------- 2 | 2 3 | 3 4 | 4 @@ -264,8 +264,8 @@ SELECT * FROM tr ORDER BY pk; UPDATE r SET a = 30 WHERE a = 3; SELECT * FROM r ORDER BY a; - a ----- + a +--------------------------------------------------------------------- 2 4 5 @@ -273,8 +273,8 @@ SELECT * FROM r ORDER BY a; (4 rows) SELECT * FROM tr ORDER BY pk; - pk | a -----+---- + pk | a +--------------------------------------------------------------------- 2 | 2 3 | 30 4 | 4 @@ -284,17 +284,17 @@ SELECT * FROM tr ORDER BY pk; -- Check we can still create distributed tables after upgrade CREATE TABLE t3(a int, b int); SELECT create_distributed_table('t3', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO t3 VALUES (1, 11); INSERT INTO t3 VALUES (2, 22); INSERT INTO t3 VALUES (3, 33); SELECT * FROM t3 ORDER BY a; - a | b ----+---- + a | b +--------------------------------------------------------------------- 1 | 11 2 | 22 3 | 33 @@ -303,15 +303,15 @@ SELECT * FROM t3 ORDER BY a; SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 't_append'::regclass ORDER BY shardminvalue, shardmaxvalue; - shardminvalue | shardmaxvalue ----------------+--------------- + shardminvalue | shardmaxvalue +--------------------------------------------------------------------- 1 | 3 5 | 7 (2 rows) SELECT * FROM t_append ORDER BY id; - id | value_1 -----+--------- + id | value_1 +--------------------------------------------------------------------- 1 | 2 2 | 3 3 | 4 @@ -324,16 +324,16 @@ SELECT * FROM t_append ORDER BY id; SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 't_append'::regclass ORDER BY shardminvalue, shardmaxvalue; - shardminvalue | shardmaxvalue ----------------+--------------- + shardminvalue | shardmaxvalue +--------------------------------------------------------------------- 1 | 3 5 | 7 9 | 11 (3 rows) SELECT * FROM t_append ORDER BY id; - id | value_1 -----+--------- + id | value_1 +--------------------------------------------------------------------- 1 | 2 2 | 3 3 | 4 diff --git a/src/test/regress/expected/upgrade_basic_before.out b/src/test/regress/expected/upgrade_basic_before.out index ef63212cf..1555d4f81 100644 --- a/src/test/regress/expected/upgrade_basic_before.out +++ b/src/test/regress/expected/upgrade_basic_before.out @@ -3,17 +3,17 @@ SET search_path TO upgrade_basic, public; CREATE TABLE t(a int); CREATE INDEX ON t USING HASH (a); SELECT create_distributed_table('t', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO t SELECT * FROM generate_series(1, 5); CREATE TABLE tp(a int PRIMARY KEY); SELECT create_distributed_table('tp', 'a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO tp SELECT * FROM generate_series(1, 5); @@ -22,9 +22,9 @@ INSERT INTO tp SELECT * FROM generate_series(1, 5); -- distribution column. The index information is in partkey column of pg_dist_partition table. CREATE TABLE t_ab(a int, b int); SELECT create_distributed_table('t_ab', 'b'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO t_ab VALUES (1, 11); @@ -38,25 +38,25 @@ ALTER TABLE t_ab DROP a; -- Check that basic reference tables work CREATE TABLE r(a int PRIMARY KEY); SELECT create_reference_table('r'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO r SELECT * FROM generate_series(1, 5); CREATE TABLE tr(pk int, a int REFERENCES r(a) ON DELETE CASCADE ON UPDATE CASCADE); SELECT create_distributed_table('tr', 'pk'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO tr SELECT c, c FROM generate_series(1, 5) as c; CREATE TABLE t_append(id int, value_1 int); SELECT master_create_distributed_table('t_append', 'id', 'append'); - master_create_distributed_table ---------------------------------- - + master_create_distributed_table +--------------------------------------------------------------------- + (1 row) \copy t_append FROM STDIN DELIMITER ',' diff --git a/src/test/regress/expected/upgrade_distributed_function_after.out b/src/test/regress/expected/upgrade_distributed_function_after.out index 21d8456d3..9b11d029d 100644 --- a/src/test/regress/expected/upgrade_distributed_function_after.out +++ b/src/test/regress/expected/upgrade_distributed_function_after.out @@ -2,24 +2,24 @@ SET search_path TO upgrade_distributed_function_before, public; -- make sure that the metadata synced SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE isactive AND noderole = 'primary'; bool_and - ---------- +--------------------------------------------------------------------- t (1 row) - + SET client_min_messages TO DEBUG1; -- these are simple select functions, so doesn't have any -- side effects, safe to be called without BEGIN;..;ROLLBACK; SELECT count_values(11); DEBUG: pushing down the function call count_values - --------------------------------------------------------------------- - 1 - (1 row) - -SELECT count_values(12); -DEBUG: pushing down the function call - count_values - --------------------------------------------------------------------- +--------------------------------------------------------------------- + 1 + (1 row) + +SELECT count_values(12); +DEBUG: pushing down the function call + count_values +--------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/upgrade_distributed_function_before.out b/src/test/regress/expected/upgrade_distributed_function_before.out index e82f86ba7..fb6440dfa 100644 --- a/src/test/regress/expected/upgrade_distributed_function_before.out +++ b/src/test/regress/expected/upgrade_distributed_function_before.out @@ -4,9 +4,9 @@ SET citus.replication_model TO streaming; SET citus.shard_replication_factor TO 1; CREATE TABLE t1 (a int PRIMARY KEY, b int); SELECT create_distributed_table('t1','a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO t1 VALUES (11), (12); @@ -21,36 +21,36 @@ $$ END; $$ LANGUAGE plpgsql; SELECT create_distributed_function('count_values(int)', '$1', colocate_with:='t1'); - create_distributed_function ------------------------------ - + create_distributed_function +--------------------------------------------------------------------- + (1 row) -- make sure that the metadata synced before running the queries SELECT wait_until_metadata_sync(); - wait_until_metadata_sync --------------------------- - + wait_until_metadata_sync +--------------------------------------------------------------------- + (1 row) SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE isactive AND noderole = 'primary'; - bool_and ----------- + bool_and +--------------------------------------------------------------------- t (1 row) SET client_min_messages TO DEBUG1; SELECT count_values(11); DEBUG: pushing down the function call - count_values --------------- + count_values +--------------------------------------------------------------------- 1 (1 row) SELECT count_values(12); DEBUG: pushing down the function call - count_values --------------- + count_values +--------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/upgrade_rebalance_strategy_after.out b/src/test/regress/expected/upgrade_rebalance_strategy_after.out index 4bfe9ed4c..36dd71b6c 100644 --- a/src/test/regress/expected/upgrade_rebalance_strategy_after.out +++ b/src/test/regress/expected/upgrade_rebalance_strategy_after.out @@ -1,6 +1,6 @@ SELECT * FROM pg_catalog.pg_dist_rebalance_strategy ORDER BY name; - name | default_strategy | shard_cost_function | node_capacity_function | shard_allowed_on_node_function | default_threshold | minimum_threshold ------------------+------------------+-----------------------------------------+---------------------------------------------------+------------------------------------------+-------------------+------------------- + name | default_strategy | shard_cost_function | node_capacity_function | shard_allowed_on_node_function | default_threshold | minimum_threshold +--------------------------------------------------------------------- by_disk_size | f | citus_shard_cost_by_disk_size | citus_node_capacity_1 | citus_shard_allowed_on_node_true | 0.1 | 0.01 by_shard_count | f | citus_shard_cost_1 | citus_node_capacity_1 | citus_shard_allowed_on_node_true | 0 | 0 custom_strategy | t | upgrade_rebalance_strategy.shard_cost_2 | upgrade_rebalance_strategy.capacity_high_worker_1 | upgrade_rebalance_strategy.only_worker_2 | 0.5 | 0.2 diff --git a/src/test/regress/expected/upgrade_rebalance_strategy_before.out b/src/test/regress/expected/upgrade_rebalance_strategy_before.out index 327d05ec2..0a12b1d60 100644 --- a/src/test/regress/expected/upgrade_rebalance_strategy_before.out +++ b/src/test/regress/expected/upgrade_rebalance_strategy_before.out @@ -24,15 +24,15 @@ SELECT citus_add_rebalance_strategy( 0.5, 0.2 ); - citus_add_rebalance_strategy ------------------------------- - + citus_add_rebalance_strategy +--------------------------------------------------------------------- + (1 row) SELECT citus_set_default_rebalance_strategy('custom_strategy'); - citus_set_default_rebalance_strategy --------------------------------------- - + citus_set_default_rebalance_strategy +--------------------------------------------------------------------- + (1 row) ALTER TABLE pg_catalog.pg_dist_rebalance_strategy ENABLE TRIGGER pg_dist_rebalance_strategy_enterprise_check_trigger; diff --git a/src/test/regress/expected/upgrade_ref2ref_after.out b/src/test/regress/expected/upgrade_ref2ref_after.out index af339e44f..ec8dc5d92 100644 --- a/src/test/regress/expected/upgrade_ref2ref_after.out +++ b/src/test/regress/expected/upgrade_ref2ref_after.out @@ -2,8 +2,8 @@ SET search_path TO upgrade_ref2ref, public; BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT * FROM ref_table_1 ORDER BY id; - id | value -----+------- + id | value +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -12,8 +12,8 @@ SELECT * FROM ref_table_1 ORDER BY id; (5 rows) SELECT * FROM ref_table_2 ORDER BY id; - id | value -----+------- + id | value +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -22,8 +22,8 @@ SELECT * FROM ref_table_2 ORDER BY id; (5 rows) SELECT * FROM ref_table_3 ORDER BY id; - id | value -----+------- + id | value +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -32,8 +32,8 @@ SELECT * FROM ref_table_3 ORDER BY id; (5 rows) SELECT * FROM dist_table ORDER BY id; - id | value -----+------- + id | value +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -43,8 +43,8 @@ SELECT * FROM dist_table ORDER BY id; UPDATE ref_table_1 SET id = 10 where id = 1; SELECT * FROM ref_table_1 ORDER BY id; - id | value -----+------- + id | value +--------------------------------------------------------------------- 2 | 2 3 | 3 4 | 4 @@ -53,8 +53,8 @@ SELECT * FROM ref_table_1 ORDER BY id; (5 rows) SELECT * FROM ref_table_2 ORDER BY id; - id | value -----+------- + id | value +--------------------------------------------------------------------- 1 | 10 2 | 2 3 | 3 @@ -63,8 +63,8 @@ SELECT * FROM ref_table_2 ORDER BY id; (5 rows) SELECT * FROM ref_table_3 ORDER BY id; - id | value -----+------- + id | value +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -73,8 +73,8 @@ SELECT * FROM ref_table_3 ORDER BY id; (5 rows) SELECT * FROM dist_table ORDER BY id; - id | value -----+------- + id | value +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -84,8 +84,8 @@ SELECT * FROM dist_table ORDER BY id; DELETE FROM ref_table_1 WHERE id = 4; SELECT * FROM ref_table_1 ORDER BY id; - id | value -----+------- + id | value +--------------------------------------------------------------------- 2 | 2 3 | 3 5 | 5 @@ -93,8 +93,8 @@ SELECT * FROM ref_table_1 ORDER BY id; (4 rows) SELECT * FROM ref_table_2 ORDER BY id; - id | value -----+------- + id | value +--------------------------------------------------------------------- 1 | 10 2 | 2 3 | 3 @@ -102,8 +102,8 @@ SELECT * FROM ref_table_2 ORDER BY id; (4 rows) SELECT * FROM ref_table_3 ORDER BY id; - id | value -----+------- + id | value +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -111,8 +111,8 @@ SELECT * FROM ref_table_3 ORDER BY id; (4 rows) SELECT * FROM dist_table ORDER BY id; - id | value -----+------- + id | value +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 diff --git a/src/test/regress/expected/upgrade_ref2ref_before.out b/src/test/regress/expected/upgrade_ref2ref_before.out index 46df5f0ec..ea91f9566 100644 --- a/src/test/regress/expected/upgrade_ref2ref_before.out +++ b/src/test/regress/expected/upgrade_ref2ref_before.out @@ -2,30 +2,30 @@ CREATE SCHEMA upgrade_ref2ref; SET search_path TO upgrade_ref2ref, public; CREATE TABLE ref_table_1(id int PRIMARY KEY, value int); SELECT create_reference_table('ref_table_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE ref_table_2(id int PRIMARY KEY, value int REFERENCES ref_table_1(id) ON DELETE CASCADE ON UPDATE CASCADE); SELECT create_reference_table('ref_table_2'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE ref_table_3(id int PRIMARY KEY, value int REFERENCES ref_table_2(id) ON DELETE CASCADE ON UPDATE CASCADE); SELECT create_reference_table('ref_table_3'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE dist_table(id int PRIMARY KEY, value int REFERENCES ref_table_2(id) ON DELETE CASCADE ON UPDATE CASCADE); SELECT create_distributed_table('dist_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO ref_table_1 SELECT c, c FROM generate_series(1, 5) as c; diff --git a/src/test/regress/expected/upgrade_type_after.out b/src/test/regress/expected/upgrade_type_after.out index 2f353522e..a9a96c490 100644 --- a/src/test/regress/expected/upgrade_type_after.out +++ b/src/test/regress/expected/upgrade_type_after.out @@ -4,8 +4,8 @@ SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; -- test distributed type INSERT INTO tt VALUES (1, (2,3)::type1); SELECT * FROM tt; - a | b ----+------- + a | b +--------------------------------------------------------------------- 1 | (2,3) 2 | (3,4) (2 rows) diff --git a/src/test/regress/expected/upgrade_type_before.out b/src/test/regress/expected/upgrade_type_before.out index b13bfa4b3..97b613748 100644 --- a/src/test/regress/expected/upgrade_type_before.out +++ b/src/test/regress/expected/upgrade_type_before.out @@ -3,9 +3,9 @@ SET search_path TO upgrade_type, public; CREATE TYPE type1 AS (a int, b int); CREATE TABLE tt (a int PRIMARY KEY, b type1); SELECT create_distributed_table('tt','a'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO tt VALUES (2, (3,4)::type1); diff --git a/src/test/regress/expected/validate_constraint.out b/src/test/regress/expected/validate_constraint.out index a8e1e19dc..a3f75a2c5 100644 --- a/src/test/regress/expected/validate_constraint.out +++ b/src/test/regress/expected/validate_constraint.out @@ -55,23 +55,23 @@ CREATE VIEW constraint_validations AS AND contype = 'c'; CREATE TABLE referenced_table (id int UNIQUE, test_column int); SELECT create_reference_table('referenced_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE referencing_table (id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE constrained_table (id int, constrained_column int); SELECT create_distributed_table('constrained_table', 'constrained_column'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- The two constraint types that are allowed to be NOT VALID @@ -111,16 +111,16 @@ ALTER TABLE constrained_table SELECT * FROM constraint_validations ORDER BY 1, 2; - Constraint | Validated? -------------------------+------------ + Constraint | Validated? +--------------------------------------------------------------------- validatable_constraint | t (1 row) SELECT * FROM constraint_validations_in_workers ORDER BY 1, 2; - name | validated ---------------------------------+----------- + name | validated +--------------------------------------------------------------------- validatable_constraint_8000009 | t validatable_constraint_8000010 | t validatable_constraint_8000011 | t diff --git a/src/test/regress/expected/window_functions.out b/src/test/regress/expected/window_functions.out index bbd0e9555..bc5d45914 100644 --- a/src/test/regress/expected/window_functions.out +++ b/src/test/regress/expected/window_functions.out @@ -11,8 +11,8 @@ FROM ORDER BY 1 DESC, 2 DESC, 3 DESC LIMIT 5; - user_id | count | rank ----------+-------+------ + user_id | count | rank +--------------------------------------------------------------------- 6 | 10 | 1 6 | 10 | 1 6 | 10 | 1 @@ -30,8 +30,8 @@ GROUP BY 1 ORDER BY 2 DESC NULLS LAST, 1 DESC; - user_id | avg ----------+------------------ + user_id | avg +--------------------------------------------------------------------- 2 | 3 4 | 2.82608695652174 3 | 2.70588235294118 @@ -57,8 +57,8 @@ GROUP BY 1, value_1 ORDER BY 2 DESC, 1; - user_id | max ----------+----- + user_id | max +--------------------------------------------------------------------- 1 | 5 3 | 5 3 | 5 @@ -110,8 +110,8 @@ ORDER BY 1, 2 LIMIT 5; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 1 | 13 1 | 13 1 | 13 @@ -134,8 +134,8 @@ GROUP BY ORDER BY 3 DESC, 2 DESC, 1 DESC LIMIT 5; - user_id | value_1 | sum ----------+---------+----- + user_id | value_1 | sum +--------------------------------------------------------------------- 5 | 5 | 15 4 | 5 | 15 3 | 5 | 15 @@ -159,8 +159,8 @@ FROM ORDER BY 2 DESC, 1 LIMIT 10; - user_id | rank ----------+------ + user_id | rank +--------------------------------------------------------------------- 5 | 6 2 | 5 4 | 5 @@ -185,8 +185,8 @@ GROUP BY HAVING count(*) > 4 ORDER BY 2 DESC, 1; - user_id | rank ----------+------ + user_id | rank +--------------------------------------------------------------------- 4 | 2 5 | 2 2 | 1 @@ -208,8 +208,8 @@ WINDOW ORDER BY rnk DESC, 1 DESC LIMIT 10; - user_id | rnk ----------+----- + user_id | rnk +--------------------------------------------------------------------- 3 | 121 5 | 118 2 | 116 @@ -234,8 +234,8 @@ WINDOW ORDER BY rnk DESC, 1 DESC LIMIT 10; - user_id | rnk ----------+----- + user_id | rnk +--------------------------------------------------------------------- 2 | 24 2 | 23 2 | 22 @@ -274,8 +274,8 @@ WINDOW my_win AS (PARTITION BY user_id ORDER BY avg(event_type) DESC) ORDER BY 3 DESC, 2 DESC, 1 DESC; - user_id | rnk | avg_val_2 ----------+-----+-------------------- + user_id | rnk | avg_val_2 +--------------------------------------------------------------------- 1 | 1 | 3.3750000000000000 3 | 2 | 3.1666666666666667 5 | 1 | 2.6666666666666667 @@ -313,8 +313,8 @@ WINDOW ORDER BY cnt_with_filter_2 DESC NULLS LAST, filtered_count DESC NULLS LAST, datee DESC NULLS LAST, rnnk DESC, cnt2 DESC, cnt1 DESC, user_id DESC LIMIT 5; - count | cnt1 | cnt2 | datee | rnnk | filtered_count | cnt_with_filter_2 --------+------+------+--------------------------+------+------------------------+------------------- + count | cnt1 | cnt2 | datee | rnnk | filtered_count | cnt_with_filter_2 +--------------------------------------------------------------------- 23 | 1 | 7 | Thu Nov 23 02:14:00 2017 | 6 | 0.00000000000000000000 | 72.7272727272727 10 | 1 | 3 | Wed Nov 22 23:01:00 2017 | 1 | 1.00000000000000000000 | 57.1428571428571 17 | 1 | 5 | Wed Nov 22 23:24:00 2017 | 8 | 3.0000000000000000 | 28.5714285714286 @@ -341,8 +341,8 @@ ORDER BY mx_time DESC, my_rank DESC, user_id DESC; - user_id | my_rank | avg | mx_time ----------+---------+------------------------+--------------------------------- + user_id | my_rank | avg | mx_time +--------------------------------------------------------------------- 6 | 1 | 3.0000000000000000 | Thu Nov 23 14:00:13.20013 2017 6 | 2 | 3.0000000000000000 | Thu Nov 23 11:16:13.106691 2017 6 | 1 | 3.0000000000000000 | Thu Nov 23 07:27:32.822068 2017 @@ -394,8 +394,8 @@ GROUP BY 1 ORDER BY 4 DESC,3 DESC,2 DESC ,1 DESC; - user_id | rank | dense_rank | cume_dist | percent_rank ----------+------+------------+-----------+-------------- + user_id | rank | dense_rank | cume_dist | percent_rank +--------------------------------------------------------------------- 6 | 1 | 1 | 1 | 0 5 | 1 | 1 | 1 | 0 4 | 1 | 1 | 1 | 0 @@ -416,9 +416,9 @@ WHERE user_id > 2 AND user_id < 6 ORDER BY user_id, value_1, 3, 4; - user_id | value_1 | array_agg | array_agg ----------+---------+-------------------------------------------------------+----------------------------------------------------- - 3 | 0 | {0} | + user_id | value_1 | array_agg | array_agg +--------------------------------------------------------------------- + 3 | 0 | {0} | 3 | 1 | {0,1,1,1,1,1,1} | {0,1,1,1,1,1} 3 | 1 | {0,1,1,1,1,1,1} | {0,1,1,1,1,1} 3 | 1 | {0,1,1,1,1,1,1} | {0,1,1,1,1,1} @@ -501,8 +501,8 @@ WINDOW range_window_exclude as (PARTITION BY user_id ORDER BY value_1 RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CURRENT ROW) ORDER BY user_id, value_1, 3, 4; - user_id | value_1 | array_agg | array_agg ----------+---------+---------------------------------------+------------------------------------- + user_id | value_1 | array_agg | array_agg +--------------------------------------------------------------------- 3 | 0 | {0,1,1,1,1,1,1} | {1,1,1,1,1,1} 3 | 1 | {0,1,1,1,1,1,1,2,2} | {0,1,1,1,1,1,2,2} 3 | 1 | {0,1,1,1,1,1,1,2,2} | {0,1,1,1,1,1,2,2} @@ -586,8 +586,8 @@ WINDOW row_window_exclude as (PARTITION BY user_id ORDER BY value_1 ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CURRENT ROW) ORDER BY user_id, value_1, 3, 4; - user_id | value_1 | array_agg | array_agg ----------+---------+-----------+----------- + user_id | value_1 | array_agg | array_agg +--------------------------------------------------------------------- 3 | 0 | {0,1} | {1} 3 | 1 | {0,1,1} | {0,1} 3 | 1 | {1,1,1} | {1,1} @@ -669,8 +669,8 @@ ORDER BY 2 DESC, 3 DESC, 1 DESC LIMIT 5; - user_id | sum | event_type ----------+-----+------------ + user_id | sum | event_type +--------------------------------------------------------------------- 4 | 4 | 4 3 | 4 | 4 2 | 4 | 4 @@ -690,8 +690,8 @@ ORDER BY 2 DESC, 1 LIMIT 10; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 5 | 3 4 | 2 (2 rows) @@ -707,8 +707,8 @@ ORDER BY 1, 2 DESC LIMIT 10; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 4 | 2 5 | 3 (2 rows) @@ -724,8 +724,8 @@ ORDER BY (SUM(value_1) OVER (PARTITION BY user_id)) , 2 DESC, 1 LIMIT 10; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 5 | 3 4 | 2 (2 rows) @@ -741,8 +741,8 @@ GROUP BY 1 ORDER BY 3 DESC, 2 DESC, 1 DESC; - user_id | avg | avg ----------+--------------------+------------------------ + user_id | avg | avg +--------------------------------------------------------------------- 6 | 2.1000000000000000 | 6.0000000000000000 5 | 2.6538461538461538 | 5.0000000000000000 4 | 2.7391304347826087 | 4.0000000000000000 @@ -764,8 +764,8 @@ GROUP BY ORDER BY 3 DESC, 2 DESC, 1 DESC; $Q$); - coordinator_plan ------------------------------------------------------------------------------------- + coordinator_plan +--------------------------------------------------------------------- Sort Sort Key: remote_scan.avg_1 DESC, remote_scan.avg DESC, remote_scan.user_id DESC -> HashAggregate @@ -784,8 +784,8 @@ GROUP BY user_id, value_2 ORDER BY user_id, value_2; - user_id | ?column? | ?column? ----------+----------+-------------------- + user_id | ?column? | ?column? +--------------------------------------------------------------------- 1 | 5 | 3.2500000000000000 1 | 4 | 3.2500000000000000 1 | 6 | 3.2500000000000000 @@ -831,8 +831,8 @@ GROUP BY ORDER BY 2 DESC, 1 LIMIT 5; - user_id | ?column? | ?column? ----------+----------+-------------------- + user_id | ?column? | ?column? +--------------------------------------------------------------------- 4 | 28 | 3.5000000000000000 5 | 24 | 3.5000000000000000 2 | 17 | 3.5000000000000000 @@ -849,8 +849,8 @@ FROM users_table GROUP BY user_id, value_2 ORDER BY user_id, value_2 DESC; - user_id | avg | rank ----------+------------------------+------ + user_id | avg | rank +--------------------------------------------------------------------- 1 | 3.6666666666666667 | 4 1 | 2.5000000000000000 | 3 1 | 3.0000000000000000 | 2 @@ -894,8 +894,8 @@ FROM users_table GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC; - user_id | avg | rank ----------+------------------------+------ + user_id | avg | rank +--------------------------------------------------------------------- 1 | 4.0000000000000000 | 1 1 | 3.6666666666666667 | 2 1 | 3.0000000000000000 | 3 @@ -939,8 +939,8 @@ FROM users_table GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Sort Sort Key: remote_scan.user_id, ((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1))) DESC -> HashAggregate @@ -949,7 +949,7 @@ ORDER BY user_id, avg(value_1) DESC; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> WindowAgg -> Sort Sort Key: users_table.user_id, (('1'::numeric / ('1'::numeric + avg(users_table.value_1)))) @@ -967,8 +967,8 @@ FROM users_table GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC; - user_id | avg | rank ----------+------------------------+------ + user_id | avg | rank +--------------------------------------------------------------------- 1 | 4.0000000000000000 | 1 1 | 3.6666666666666667 | 2 1 | 3.0000000000000000 | 3 @@ -1014,8 +1014,8 @@ FROM GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC LIMIT 5; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.user_id, ((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1))) DESC @@ -1025,7 +1025,7 @@ LIMIT 5; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> WindowAgg -> Sort Sort Key: users_table.user_id, (('1'::numeric / ('1'::numeric + avg(users_table.value_1)))) @@ -1044,8 +1044,8 @@ FROM GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC LIMIT 5; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.user_id, ((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1))) DESC @@ -1055,7 +1055,7 @@ LIMIT 5; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> WindowAgg -> Sort Sort Key: users_table.user_id, (('1'::numeric / ('1'::numeric + avg(users_table.value_1)))) @@ -1074,8 +1074,8 @@ FROM GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC LIMIT 5; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.user_id, ((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1))) DESC @@ -1085,7 +1085,7 @@ LIMIT 5; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> WindowAgg -> Sort Sort Key: users_table.user_id, ((1 / (1 + sum(users_table.value_2)))) @@ -1104,8 +1104,8 @@ FROM GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC LIMIT 5; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.user_id, ((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1))) DESC @@ -1115,7 +1115,7 @@ LIMIT 5; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> WindowAgg -> Sort Sort Key: users_table.user_id, (sum(users_table.value_2)) diff --git a/src/test/regress/expected/with_basics.out b/src/test/regress/expected/with_basics.out index 939689800..8a6df96a5 100644 --- a/src/test/regress/expected/with_basics.out +++ b/src/test/regress/expected/with_basics.out @@ -6,8 +6,8 @@ WITH cte AS ( SELECT user_id, value_2 from users_table WHERE user_id IN (1, 2) ORDER BY 1,2 LIMIT 5 ) SELECT * FROM cte; - user_id | value_2 ----------+--------- + user_id | value_2 +--------------------------------------------------------------------- 1 | 0 1 | 2 1 | 3 @@ -29,8 +29,8 @@ ORDER BY value_2 LIMIT 5; - value_2 ---------- + value_2 +--------------------------------------------------------------------- 0 0 0 @@ -49,8 +49,8 @@ WITH cte_1 AS ( SELECT user_id FROM cte_1_2 ORDER BY user_id ) SELECT value_2 FROM users_table WHERE user_id IN (SELECT user_id FROM cte_1) ORDER BY value_2 LIMIT 1; - value_2 ---------- + value_2 +--------------------------------------------------------------------- 0 (1 row) @@ -71,8 +71,8 @@ ORDER BY 1, 2 LIMIT 5; - max | value_2 ------+--------- + max | value_2 +--------------------------------------------------------------------- 5 | 5 6 | 5 6 | 5 @@ -87,8 +87,8 @@ SELECT user_id FROM ( ) SELECT user_id FROM cte WHERE value_2 > 0 ) a ORDER BY 1 LIMIT 3; - user_id ---------- + user_id +--------------------------------------------------------------------- 2 2 2 @@ -100,7 +100,7 @@ WITH cte AS ( ) SELECT (SELECT * FROM cte); ERROR: more than one row returned by a subquery used as an expression -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx WITH cte_basic AS ( SELECT user_id FROM users_table WHERE user_id = 1 ) @@ -115,8 +115,8 @@ WITH cte AS ( SELECT user_id FROM users_table WHERE value_2 IN (1, 2) ) SELECT (SELECT * FROM cte ORDER BY 1 LIMIT 1); - user_id ---------- + user_id +--------------------------------------------------------------------- 1 (1 row) @@ -134,8 +134,8 @@ GROUP BY ORDER BY 1, 2 LIMIT 5; - user_id | count ----------+------- + user_id | count +--------------------------------------------------------------------- 1 | 7 2 | 18 3 | 17 @@ -160,8 +160,8 @@ HAVING ORDER BY 1, 2 LIMIT 5; - twice | min --------+----- + twice | min +--------------------------------------------------------------------- 6 | 3 8 | 4 10 | 5 @@ -182,8 +182,8 @@ ORDER BY user_id LIMIT 1; - user_id ---------- + user_id +--------------------------------------------------------------------- 2 (1 row) @@ -201,8 +201,8 @@ ORDER BY user_id LIMIT 1; - user_id ---------- + user_id +--------------------------------------------------------------------- 2 (1 row) @@ -216,8 +216,8 @@ FROM (SELECT min(user_id) AS user_id FROM top_users) top_users JOIN users_table USING (user_id); - user_id ---------- + user_id +--------------------------------------------------------------------- 6 6 6 @@ -235,8 +235,8 @@ WITH top_ten(id, val1) AS ( SELECT user_id, value_1 FROM users_table ORDER BY value_1 DESC, user_id DESC LIMIT 10 ) SELECT * FROM top_ten; - id | val1 -----+------ + id | val1 +--------------------------------------------------------------------- 6 | 5 6 | 5 5 | 5 @@ -255,15 +255,13 @@ WITH top_ten(id, val1) AS ( ) SELECT * FROM top_ten ORDER BY user_id DESC; ERROR: column "user_id" does not exist -LINE 4: SELECT * FROM top_ten ORDER BY user_id DESC; - ^ -- verify original name is used if alias is missing WITH top_ten(id) AS ( SELECT user_id, value_1 FROM users_table ORDER BY value_1 DESC, user_id DESC LIMIT 10 ) SELECT * FROM top_ten ORDER BY value_1 DESC; - id | value_1 -----+--------- + id | value_1 +--------------------------------------------------------------------- 6 | 5 6 | 5 5 | 5 @@ -281,8 +279,8 @@ WITH top_ten(id, val, val_mul, val_sum) AS ( SELECT user_id, value_1, value_1*2, value_1 + value_2 FROM users_table ORDER BY value_1 DESC, user_id DESC, value_2 DESC LIMIT 10 ) SELECT * FROM top_ten ORDER BY id DESC, val_mul DESC, (val_sum + 1) DESC; - id | val | val_mul | val_sum -----+-----+---------+--------- + id | val | val_mul | val_sum +--------------------------------------------------------------------- 6 | 5 | 10 | 7 6 | 5 | 10 | 5 5 | 5 | 10 | 10 @@ -300,8 +298,8 @@ WITH top_ten(id, val, val_mul, val_sum) AS ( SELECT user_id, value_1, value_1*2, value_1 + value_2 FROM users_table ORDER BY value_1 DESC, value_2 DESC, user_id DESC LIMIT 10 ) SELECT id, val, id * val, val_sum * 2, val_sum + val_sum FROM top_ten ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; - id | val | ?column? | ?column? | ?column? -----+-----+----------+----------+---------- + id | val | ?column? | ?column? | ?column? +--------------------------------------------------------------------- 6 | 5 | 30 | 14 | 14 6 | 5 | 30 | 10 | 10 5 | 5 | 25 | 20 | 20 @@ -321,8 +319,8 @@ WITH top_ten(id, val, val_mul, val_sum) AS ( SELECT id, count(*), avg(val), max(val_mul), min(val_sum) FROM top_ten GROUP BY id ORDER BY 2 DESC, 1 DESC; - id | count | avg | max | min -----+-------+--------------------+-----+----- + id | count | avg | max | min +--------------------------------------------------------------------- 5 | 26 | 2.6538461538461538 | 10 | 2 4 | 23 | 2.7391304347826087 | 10 | 0 2 | 18 | 2.3333333333333333 | 8 | 1 @@ -345,8 +343,8 @@ ORDER BY user_id LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 6 6 6 @@ -368,8 +366,8 @@ ORDER BY user_id LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 6 6 6 @@ -391,8 +389,8 @@ ORDER BY user_id LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 6 6 6 @@ -414,8 +412,8 @@ ORDER BY user_id LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 6 6 6 @@ -440,8 +438,8 @@ ORDER BY user_id LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 6 6 6 @@ -484,8 +482,8 @@ ORDER BY user_id LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 1 1 @@ -522,8 +520,8 @@ ORDER BY user_id LIMIT 5; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 1 1 @@ -547,8 +545,8 @@ ORDER BY 2 DESC, 1 LIMIT 5; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 3 | 651 2 | 552 4 | 544 @@ -567,18 +565,18 @@ ORDER BY 1,2,3,4,5,6 LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Wed Nov 22 18:49:42.327403 2017 | 3 | 2 | 1 | - 1 | Wed Nov 22 19:03:01.772353 2017 | 4 | 1 | 2 | - 1 | Wed Nov 22 19:07:03.846437 2017 | 1 | 2 | 5 | - 1 | Wed Nov 22 20:56:21.122638 2017 | 2 | 4 | 4 | - 1 | Wed Nov 22 21:06:57.457147 2017 | 4 | 3 | 2 | - 1 | Wed Nov 22 21:47:04.188168 2017 | 4 | 2 | 0 | - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Wed Nov 22 23:22:09.957743 2017 | 1 | 1 | 1 | - 1 | Thu Nov 23 00:42:37.237615 2017 | 2 | 4 | 3 | - 1 | Thu Nov 23 02:59:23.620864 2017 | 4 | 5 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Wed Nov 22 18:49:42.327403 2017 | 3 | 2 | 1 | + 1 | Wed Nov 22 19:03:01.772353 2017 | 4 | 1 | 2 | + 1 | Wed Nov 22 19:07:03.846437 2017 | 1 | 2 | 5 | + 1 | Wed Nov 22 20:56:21.122638 2017 | 2 | 4 | 4 | + 1 | Wed Nov 22 21:06:57.457147 2017 | 4 | 3 | 2 | + 1 | Wed Nov 22 21:47:04.188168 2017 | 4 | 2 | 0 | + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Wed Nov 22 23:22:09.957743 2017 | 1 | 1 | 1 | + 1 | Thu Nov 23 00:42:37.237615 2017 | 2 | 4 | 3 | + 1 | Thu Nov 23 02:59:23.620864 2017 | 4 | 5 | 4 | (10 rows) SELECT * FROM ( @@ -591,18 +589,18 @@ ORDER BY 1,2,3,4,5,6 LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Wed Nov 22 18:49:42.327403 2017 | 3 | 2 | 1 | - 1 | Wed Nov 22 19:03:01.772353 2017 | 4 | 1 | 2 | - 1 | Wed Nov 22 19:07:03.846437 2017 | 1 | 2 | 5 | - 1 | Wed Nov 22 20:56:21.122638 2017 | 2 | 4 | 4 | - 1 | Wed Nov 22 21:06:57.457147 2017 | 4 | 3 | 2 | - 1 | Wed Nov 22 21:47:04.188168 2017 | 4 | 2 | 0 | - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Wed Nov 22 23:22:09.957743 2017 | 1 | 1 | 1 | - 1 | Thu Nov 23 00:42:37.237615 2017 | 2 | 4 | 3 | - 1 | Thu Nov 23 02:59:23.620864 2017 | 4 | 5 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Wed Nov 22 18:49:42.327403 2017 | 3 | 2 | 1 | + 1 | Wed Nov 22 19:03:01.772353 2017 | 4 | 1 | 2 | + 1 | Wed Nov 22 19:07:03.846437 2017 | 1 | 2 | 5 | + 1 | Wed Nov 22 20:56:21.122638 2017 | 2 | 4 | 4 | + 1 | Wed Nov 22 21:06:57.457147 2017 | 4 | 3 | 2 | + 1 | Wed Nov 22 21:47:04.188168 2017 | 4 | 2 | 0 | + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Wed Nov 22 23:22:09.957743 2017 | 1 | 1 | 1 | + 1 | Thu Nov 23 00:42:37.237615 2017 | 2 | 4 | 3 | + 1 | Thu Nov 23 02:59:23.620864 2017 | 4 | 5 | 4 | (10 rows) -- SELECT * FROM (SELECT * FROM cte UNION SELECT * FROM cte) a; should work @@ -617,13 +615,13 @@ ORDER BY 1,2,3,4,5,6 LIMIT 5; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | (5 rows) WITH cte AS ( @@ -633,18 +631,18 @@ cte_2 AS ( SELECT * FROM users_table WHERE user_id IN (3, 4) ORDER BY 1,2,3 LIMIT 5 ) SELECT * FROM cte UNION ALL SELECT * FROM cte_2; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | - 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | - 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | - 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 4 | - 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | + 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | + 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | + 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 4 | + 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | (10 rows) -- basic recursive CTE which should all error out @@ -735,8 +733,8 @@ WITH cte_user AS ( SELECT basic_view.user_id,events_table.value_2 FROM basic_view join events_table on (basic_view.user_id = events_table.user_id) ) SELECT user_id, sum(value_2) FROM cte_user GROUP BY 1 ORDER BY 1, 2; - user_id | sum ----------+------ + user_id | sum +--------------------------------------------------------------------- 1 | 294 2 | 1026 3 | 782 @@ -746,8 +744,8 @@ SELECT user_id, sum(value_2) FROM cte_user GROUP BY 1 ORDER BY 1, 2; (6 rows) SELECT * FROM cte_view ORDER BY 1, 2 LIMIT 5; - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- 1 | 5 2 | 4 3 | 5 @@ -760,8 +758,8 @@ WITH cte_user_with_view AS SELECT * FROM cte_view WHERE user_id < 3 ) SELECT user_id, value_1 FROM cte_user_with_view ORDER BY 1, 2 LIMIT 10 OFFSET 2; - user_id | value_1 ----------+--------- + user_id | value_1 +--------------------------------------------------------------------- (0 rows) DROP VIEW basic_view; diff --git a/src/test/regress/expected/with_dml.out b/src/test/regress/expected/with_dml.out index 89543cef2..3e33fe52f 100644 --- a/src/test/regress/expected/with_dml.out +++ b/src/test/regress/expected/with_dml.out @@ -2,23 +2,23 @@ CREATE SCHEMA with_dml; SET search_path TO with_dml, public; CREATE TABLE with_dml.distributed_table (tenant_id text PRIMARY KEY, dept int); SELECT create_distributed_table('distributed_table', 'tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE with_dml.second_distributed_table (tenant_id text, dept int); SELECT create_distributed_table('second_distributed_table', 'tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE with_dml.reference_table (id text, name text); SELECT create_reference_table('reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO distributed_table SELECT i::text, i % 10 FROM generate_series (0, 100) i; @@ -30,15 +30,15 @@ WITH ids_to_delete AS ( SELECT tenant_id FROM distributed_table WHERE dept = 1 ) DELETE FROM reference_table WHERE id IN (SELECT tenant_id FROM ids_to_delete); -DEBUG: generating subplan 4_1 for CTE ids_to_delete: SELECT tenant_id FROM with_dml.distributed_table WHERE (dept OPERATOR(pg_catalog.=) 1) -DEBUG: Plan 4 query after replacing subqueries and CTEs: DELETE FROM with_dml.reference_table WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT ids_to_delete.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_to_delete)) +DEBUG: generating subplan XXX_1 for CTE ids_to_delete: SELECT tenant_id FROM with_dml.distributed_table WHERE (dept OPERATOR(pg_catalog.=) 1) +DEBUG: Plan XXX query after replacing subqueries and CTEs: DELETE FROM with_dml.reference_table WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT ids_to_delete.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_to_delete)) -- update the name of the users whose dept is 2 WITH ids_to_update AS ( SELECT tenant_id FROM distributed_table WHERE dept = 2 ) UPDATE reference_table SET name = 'new_' || name WHERE id IN (SELECT tenant_id FROM ids_to_update); -DEBUG: generating subplan 6_1 for CTE ids_to_update: SELECT tenant_id FROM with_dml.distributed_table WHERE (dept OPERATOR(pg_catalog.=) 2) -DEBUG: Plan 6 query after replacing subqueries and CTEs: UPDATE with_dml.reference_table SET name = ('new_'::text OPERATOR(pg_catalog.||) name) WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT ids_to_update.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_to_update)) +DEBUG: generating subplan XXX_1 for CTE ids_to_update: SELECT tenant_id FROM with_dml.distributed_table WHERE (dept OPERATOR(pg_catalog.=) 2) +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE with_dml.reference_table SET name = ('new_'::text OPERATOR(pg_catalog.||) name) WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT ids_to_update.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_to_update)) -- now the CTE is also modifying WITH ids_deleted_3 AS ( @@ -49,10 +49,10 @@ ids_deleted_4 AS DELETE FROM distributed_table WHERE dept = 4 RETURNING tenant_id ) DELETE FROM reference_table WHERE id IN (SELECT * FROM ids_deleted_3 UNION SELECT * FROM ids_deleted_4); -DEBUG: generating subplan 8_1 for CTE ids_deleted_3: DELETE FROM with_dml.distributed_table WHERE (dept OPERATOR(pg_catalog.=) 3) RETURNING tenant_id -DEBUG: generating subplan 8_2 for CTE ids_deleted_4: DELETE FROM with_dml.distributed_table WHERE (dept OPERATOR(pg_catalog.=) 4) RETURNING tenant_id -DEBUG: generating subplan 8_3 for subquery SELECT ids_deleted_3.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_deleted_3 UNION SELECT ids_deleted_4.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_deleted_4 -DEBUG: Plan 8 query after replacing subqueries and CTEs: DELETE FROM with_dml.reference_table WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.tenant_id FROM read_intermediate_result('8_3'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text))) +DEBUG: generating subplan XXX_1 for CTE ids_deleted_3: DELETE FROM with_dml.distributed_table WHERE (dept OPERATOR(pg_catalog.=) 3) RETURNING tenant_id +DEBUG: generating subplan XXX_2 for CTE ids_deleted_4: DELETE FROM with_dml.distributed_table WHERE (dept OPERATOR(pg_catalog.=) 4) RETURNING tenant_id +DEBUG: generating subplan XXX_3 for subquery SELECT ids_deleted_3.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_deleted_3 UNION SELECT ids_deleted_4.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_deleted_4 +DEBUG: Plan XXX query after replacing subqueries and CTEs: DELETE FROM with_dml.reference_table WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text))) -- now the final UPDATE command is pushdownable WITH ids_to_delete AS ( @@ -68,8 +68,8 @@ WHERE some_tenants.tenant_id = ids_to_delete.tenant_id AND distributed_table.tenant_id = some_tenants.tenant_id AND EXISTS (SELECT * FROM ids_to_delete); -DEBUG: generating subplan 12_1 for CTE ids_to_delete: SELECT tenant_id FROM with_dml.distributed_table WHERE (dept OPERATOR(pg_catalog.=) 5) -DEBUG: Plan 12 query after replacing subqueries and CTEs: UPDATE with_dml.distributed_table SET dept = (distributed_table.dept OPERATOR(pg_catalog.+) 1) FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_to_delete, (SELECT distributed_table_1.tenant_id FROM with_dml.distributed_table distributed_table_1 WHERE ((distributed_table_1.tenant_id)::integer OPERATOR(pg_catalog.<) 60)) some_tenants WHERE ((some_tenants.tenant_id OPERATOR(pg_catalog.=) ids_to_delete.tenant_id) AND (distributed_table.tenant_id OPERATOR(pg_catalog.=) some_tenants.tenant_id) AND (EXISTS (SELECT ids_to_delete_1.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_to_delete_1))) +DEBUG: generating subplan XXX_1 for CTE ids_to_delete: SELECT tenant_id FROM with_dml.distributed_table WHERE (dept OPERATOR(pg_catalog.=) 5) +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE with_dml.distributed_table SET dept = (distributed_table.dept OPERATOR(pg_catalog.+) 1) FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_to_delete, (SELECT distributed_table_1.tenant_id FROM with_dml.distributed_table distributed_table_1 WHERE ((distributed_table_1.tenant_id)::integer OPERATOR(pg_catalog.<) 60)) some_tenants WHERE ((some_tenants.tenant_id OPERATOR(pg_catalog.=) ids_to_delete.tenant_id) AND (distributed_table.tenant_id OPERATOR(pg_catalog.=) some_tenants.tenant_id) AND (EXISTS (SELECT ids_to_delete_1.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_to_delete_1))) -- this query errors out since we've some hard -- errors in the INSERT ... SELECT pushdown -- which prevents to fallback to recursive planning @@ -97,9 +97,9 @@ DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition DETAIL: Subquery contains an expression that is not a simple column reference in the same position as the target table's partition column. HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator -DEBUG: generating subplan 16_1 for CTE ids_to_insert: SELECT (((tenant_id)::integer OPERATOR(pg_catalog.*) 100))::text AS tenant_id FROM with_dml.distributed_table WHERE (dept OPERATOR(pg_catalog.>) 7) -DEBUG: generating subplan 16_2 for subquery SELECT DISTINCT ids_to_insert.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_to_insert, with_dml.distributed_table WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.<) ids_to_insert.tenant_id) -DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('16_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) citus_insert_select_subquery +DEBUG: generating subplan XXX_1 for CTE ids_to_insert: SELECT (((tenant_id)::integer OPERATOR(pg_catalog.*) 100))::text AS tenant_id FROM with_dml.distributed_table WHERE (dept OPERATOR(pg_catalog.>) 7) +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT ids_to_insert.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_to_insert, with_dml.distributed_table WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.<) ids_to_insert.tenant_id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) citus_insert_select_subquery -- not a very meaningful query -- but has two modifying CTEs along with another -- modify statement @@ -132,10 +132,10 @@ INSERT INTO second_distributed_table FROM copy_to_other_table; DEBUG: distributed INSERT ... SELECT can only select from distributed tables DEBUG: Collecting INSERT ... SELECT results on coordinator -DEBUG: generating subplan 20_1 for CTE copy_to_other_table: INSERT INTO with_dml.distributed_table (tenant_id, dept) SELECT tenant_id, dept FROM with_dml.second_distributed_table WHERE (dept OPERATOR(pg_catalog.=) 3) ON CONFLICT(tenant_id) DO UPDATE SET dept = 4 RETURNING distributed_table.tenant_id, distributed_table.dept -DEBUG: generating subplan 20_2 for CTE main_table_deleted: DELETE FROM with_dml.distributed_table WHERE ((dept OPERATOR(pg_catalog.<) 10) AND (NOT (EXISTS (SELECT 1 FROM with_dml.second_distributed_table WHERE ((second_distributed_table.dept OPERATOR(pg_catalog.=) 1) AND (second_distributed_table.tenant_id OPERATOR(pg_catalog.=) distributed_table.tenant_id)))))) RETURNING tenant_id, dept -DEBUG: generating subplan 20_3 for subquery SELECT main_table_deleted.tenant_id, main_table_deleted.dept FROM (SELECT intermediate_result.tenant_id, intermediate_result.dept FROM read_intermediate_result('20_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, dept integer)) main_table_deleted EXCEPT SELECT copy_to_other_table.tenant_id, copy_to_other_table.dept FROM (SELECT intermediate_result.tenant_id, intermediate_result.dept FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, dept integer)) copy_to_other_table -DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT tenant_id, dept FROM (SELECT intermediate_result.tenant_id, intermediate_result.dept FROM read_intermediate_result('20_3'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, dept integer)) citus_insert_select_subquery +DEBUG: generating subplan XXX_1 for CTE copy_to_other_table: INSERT INTO with_dml.distributed_table (tenant_id, dept) SELECT tenant_id, dept FROM with_dml.second_distributed_table WHERE (dept OPERATOR(pg_catalog.=) 3) ON CONFLICT(tenant_id) DO UPDATE SET dept = 4 RETURNING distributed_table.tenant_id, distributed_table.dept +DEBUG: generating subplan XXX_2 for CTE main_table_deleted: DELETE FROM with_dml.distributed_table WHERE ((dept OPERATOR(pg_catalog.<) 10) AND (NOT (EXISTS (SELECT 1 FROM with_dml.second_distributed_table WHERE ((second_distributed_table.dept OPERATOR(pg_catalog.=) 1) AND (second_distributed_table.tenant_id OPERATOR(pg_catalog.=) distributed_table.tenant_id)))))) RETURNING tenant_id, dept +DEBUG: generating subplan XXX_3 for subquery SELECT main_table_deleted.tenant_id, main_table_deleted.dept FROM (SELECT intermediate_result.tenant_id, intermediate_result.dept FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, dept integer)) main_table_deleted EXCEPT SELECT copy_to_other_table.tenant_id, copy_to_other_table.dept FROM (SELECT intermediate_result.tenant_id, intermediate_result.dept FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, dept integer)) copy_to_other_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT tenant_id, dept FROM (SELECT intermediate_result.tenant_id, intermediate_result.dept FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, dept integer)) citus_insert_select_subquery SET citus.force_max_query_parallelization TO off; -- CTE inside the UPDATE statement UPDATE @@ -145,23 +145,23 @@ SET dept = SELECT DISTINCT tenant_id::int FROM distributed_table ) select * from vals where tenant_id = 8 ) WHERE dept = 8; -DEBUG: generating subplan 24_1 for CTE vals: SELECT DISTINCT (tenant_id)::integer AS tenant_id FROM with_dml.distributed_table -DEBUG: Plan 24 query after replacing subqueries and CTEs: UPDATE with_dml.second_distributed_table SET dept = (SELECT vals.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('24_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) vals WHERE (vals.tenant_id OPERATOR(pg_catalog.=) 8)) WHERE (dept OPERATOR(pg_catalog.=) 8) +DEBUG: generating subplan XXX_1 for CTE vals: SELECT DISTINCT (tenant_id)::integer AS tenant_id FROM with_dml.distributed_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE with_dml.second_distributed_table SET dept = (SELECT vals.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) vals WHERE (vals.tenant_id OPERATOR(pg_catalog.=) 8)) WHERE (dept OPERATOR(pg_catalog.=) 8) -- Subquery inside the UPDATE statement UPDATE second_distributed_table SET dept = (SELECT DISTINCT tenant_id::int FROM distributed_table WHERE tenant_id = '9') WHERE dept = 8; -DEBUG: generating subplan 26_1 for subquery SELECT DISTINCT (tenant_id)::integer AS tenant_id FROM with_dml.distributed_table WHERE (tenant_id OPERATOR(pg_catalog.=) '9'::text) -DEBUG: Plan 26 query after replacing subqueries and CTEs: UPDATE with_dml.second_distributed_table SET dept = (SELECT intermediate_result.tenant_id FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) WHERE (dept OPERATOR(pg_catalog.=) 8) +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT (tenant_id)::integer AS tenant_id FROM with_dml.distributed_table WHERE (tenant_id OPERATOR(pg_catalog.=) '9'::text) +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE with_dml.second_distributed_table SET dept = (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) WHERE (dept OPERATOR(pg_catalog.=) 8) -- delete all remaining tenants WITH ids_to_delete AS ( SELECT tenant_id FROM distributed_table ) DELETE FROM distributed_table WHERE tenant_id = ANY(SELECT tenant_id FROM ids_to_delete); -DEBUG: generating subplan 28_1 for CTE ids_to_delete: SELECT tenant_id FROM with_dml.distributed_table -DEBUG: Plan 28 query after replacing subqueries and CTEs: DELETE FROM with_dml.distributed_table WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_to_delete.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('28_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_to_delete)) +DEBUG: generating subplan XXX_1 for CTE ids_to_delete: SELECT tenant_id FROM with_dml.distributed_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: DELETE FROM with_dml.distributed_table WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_to_delete.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_to_delete)) WITH ids_to_delete AS ( SELECT id FROM reference_table ) diff --git a/src/test/regress/expected/with_executors.out b/src/test/regress/expected/with_executors.out index 5e7955c94..5c1fb8317 100644 --- a/src/test/regress/expected/with_executors.out +++ b/src/test/regress/expected/with_executors.out @@ -3,7 +3,7 @@ CREATE SCHEMA with_executors; SET search_path TO with_executors, public; SET citus.enable_repartition_joins TO on; CREATE TABLE with_executors.local_table (id int); -INSERT INTO local_table VALUES (0), (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); +INSERT INTO local_table VALUES (0), (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); -- CTEs should be able to use local queries WITH cte AS ( WITH local_cte AS ( @@ -15,8 +15,8 @@ WITH cte AS ( SELECT * FROM local_cte join dist_cte on dist_cte.user_id=local_cte.id ) SELECT count(*) FROM cte; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -33,8 +33,8 @@ WITH cte AS ( SELECT * FROM merger_cte WHERE user_id IN (1, 2, 3) ) SELECT * FROM cte ORDER BY 1; - user_id ---------- + user_id +--------------------------------------------------------------------- 1 2 3 @@ -49,32 +49,32 @@ WITH cte AS ( ) SELECT local_cte.id as id_1, local_cte_2.id as id_2 FROM local_cte,local_cte_2 ) -SELECT - * -FROM - cte -join - users_table -on - cte.id_1 = users_table.user_id -WHERE +SELECT + * +FROM + cte +join + users_table +on + cte.id_1 = users_table.user_id +WHERE cte.id_1 IN (3, 4, 5) ORDER BY 1,2,3,4,5,6,7 LIMIT 10; - id_1 | id_2 | user_id | time | value_1 | value_2 | value_3 | value_4 -------+------+---------+---------------------------------+---------+---------+---------+--------- - 3 | 6 | 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | - 3 | 6 | 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | - 3 | 6 | 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | - 3 | 6 | 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 4 | - 3 | 6 | 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | - 3 | 6 | 3 | Thu Nov 23 03:52:32.008895 2017 | 4 | 2 | 0 | - 3 | 6 | 3 | Thu Nov 23 04:01:08.04806 2017 | 5 | 5 | 3 | - 3 | 6 | 3 | Thu Nov 23 05:01:44.885505 2017 | 3 | 5 | 4 | - 3 | 6 | 3 | Thu Nov 23 06:20:05.854857 2017 | 1 | 4 | 2 | - 3 | 6 | 3 | Thu Nov 23 09:57:41.540228 2017 | 2 | 2 | 3 | + id_1 | id_2 | user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 3 | 6 | 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | + 3 | 6 | 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | + 3 | 6 | 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | + 3 | 6 | 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 4 | + 3 | 6 | 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | + 3 | 6 | 3 | Thu Nov 23 03:52:32.008895 2017 | 4 | 2 | 0 | + 3 | 6 | 3 | Thu Nov 23 04:01:08.04806 2017 | 5 | 5 | 3 | + 3 | 6 | 3 | Thu Nov 23 05:01:44.885505 2017 | 3 | 5 | 4 | + 3 | 6 | 3 | Thu Nov 23 06:20:05.854857 2017 | 1 | 4 | 2 | + 3 | 6 | 3 | Thu Nov 23 09:57:41.540228 2017 | 2 | 2 | 3 | (10 rows) -- CTEs should be able to use router queries @@ -85,14 +85,14 @@ WITH cte AS ( router_cte_2 AS ( SELECT user_id, event_type, value_2 FROM events_table WHERE user_id = 1 ) - SELECT - router_cte.user_id as uid, event_type - FROM + SELECT + router_cte.user_id as uid, event_type + FROM router_cte, router_cte_2 ) SELECT * FROM cte ORDER BY 2 LIMIT 5; - uid | event_type ------+------------ + uid | event_type +--------------------------------------------------------------------- 1 | 0 1 | 0 1 | 0 @@ -105,18 +105,18 @@ WITH real_time_cte AS ( SELECT * FROM users_table WHERE value_2 IN (1, 2, 3) ) SELECT * FROM real_time_cte ORDER BY 1, 2, 3, 4, 5, 6 LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (10 rows) -- router & real-time together @@ -127,16 +127,16 @@ WITH cte AS ( real_time AS ( SELECT user_id, event_type, value_2 FROM events_table ) - SELECT - router_cte.user_id as uid, event_type - FROM - router_cte, real_time - WHERE + SELECT + router_cte.user_id as uid, event_type + FROM + router_cte, real_time + WHERE router_cte.user_id=real_time.user_id ) SELECT * FROM cte WHERE uid=1 ORDER BY 2 LIMIT 5; - uid | event_type ------+------------ + uid | event_type +--------------------------------------------------------------------- 1 | 0 1 | 0 1 | 0 @@ -147,47 +147,47 @@ SELECT * FROM cte WHERE uid=1 ORDER BY 2 LIMIT 5; -- CTEs should be able to use task-tracker queries WITH cte AS ( WITH task_tracker_1 AS ( - SELECT - users_table.user_id as uid_1, users_table.value_2 - FROM - users_table + SELECT + users_table.user_id as uid_1, users_table.value_2 + FROM + users_table JOIN - events_table - ON + events_table + ON users_table.value_2=events_table.value_2 ), task_tracker_2 AS ( - SELECT - users_table.user_id as uid_2, users_table.value_3 - FROM - users_table - JOIN - events_table - ON + SELECT + users_table.user_id as uid_2, users_table.value_3 + FROM + users_table + JOIN + events_table + ON users_table.value_3=events_table.value_3 ) - SELECT + SELECT uid_1, uid_2, value_2, value_3 - FROM + FROM task_tracker_1 JOIN task_tracker_2 - ON + ON value_2 = value_3 ) -SELECT - uid_1, uid_2, cte.value_2, cte.value_3 -FROM - cte -JOIN +SELECT + uid_1, uid_2, cte.value_2, cte.value_3 +FROM + cte +JOIN events_table ON cte.value_2 = events_table.event_type -ORDER BY - 1, 2, 3, 4 +ORDER BY + 1, 2, 3, 4 LIMIT 10; - uid_1 | uid_2 | value_2 | value_3 --------+-------+---------+--------- + uid_1 | uid_2 | value_2 | value_3 +--------------------------------------------------------------------- 1 | 1 | 0 | 0 1 | 1 | 0 | 0 1 | 1 | 0 | 0 @@ -203,13 +203,13 @@ LIMIT 10; -- All combined WITH cte AS ( WITH task_tracker AS ( - SELECT + SELECT users_table.user_id as uid_1, users_table.value_2 as val_2 - FROM - users_table + FROM + users_table JOIN - events_table - ON + events_table + ON users_table.value_2=events_table.value_2 ), real_time AS ( @@ -225,33 +225,33 @@ WITH cte AS ( SELECT uid_1, time, value_3 FROM task_tracker JOIN real_time ON val_2=value_3 ), join_last_two AS ( - SELECT - router_exec.user_id, local_table.id - FROM - router_exec - JOIN - local_table - ON + SELECT + router_exec.user_id, local_table.id + FROM + router_exec + JOIN + local_table + ON router_exec.user_id=local_table.id ) SELECT * FROM join_first_two JOIN join_last_two ON id = value_3 ORDER BY 1,2,3,4,5 LIMIT 10 ) SELECT DISTINCT uid_1, time, value_3 FROM cte ORDER BY 1, 2, 3 LIMIT 20; - uid_1 | time | value_3 --------+---------------------------------+--------- + uid_1 | time | value_3 +--------------------------------------------------------------------- 2 | Wed Nov 22 18:19:49.944985 2017 | 1 (1 row) -- All combined with outer join WITH cte AS ( WITH task_tracker AS ( - SELECT + SELECT users_table.user_id as uid_1, users_table.value_2 as val_2 - FROM - users_table + FROM + users_table JOIN - events_table - ON + events_table + ON users_table.value_2=events_table.value_2 ), real_time AS ( @@ -267,20 +267,20 @@ WITH cte AS ( SELECT uid_1, time, value_3 FROM task_tracker JOIN real_time ON val_2=value_3 ), join_last_two AS ( - SELECT - router_exec.user_id, local_table.id - FROM - router_exec - JOIN - local_table - ON + SELECT + router_exec.user_id, local_table.id + FROM + router_exec + JOIN + local_table + ON router_exec.user_id=local_table.id ) SELECT uid_1, value_3 as val_3 FROM join_first_two JOIN join_last_two ON id = value_3 ORDER BY 1,2 LIMIT 10 ) SELECT DISTINCT uid_1, val_3 FROM cte join events_table on cte.val_3=events_table.event_type ORDER BY 1, 2; - uid_1 | val_3 --------+------- + uid_1 | val_3 +--------------------------------------------------------------------- 2 | 1 (1 row) @@ -312,8 +312,8 @@ FROM cte, users_table WHERE cte.count=user_id and user_id=5; - row_number | count -------------+------- + row_number | count +--------------------------------------------------------------------- 1 | 0 (1 row) @@ -334,15 +334,15 @@ WITH cte AS ( SELECT count(*) FROM users_table join cte_merge on id=user_id ) SELECT count(*) FROM cte, users_table where cte.count=user_id; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SET citus.task_executor_type='task-tracker'; -- CTEs shouldn't be able to terminate a task-tracker query WITH cte_1 AS ( - SELECT + SELECT u_table.user_id as u_id, e_table.event_type FROM users_table as u_table diff --git a/src/test/regress/expected/with_join.out b/src/test/regress/expected/with_join.out index 3e7f6ff69..12d855c3e 100644 --- a/src/test/regress/expected/with_join.out +++ b/src/test/regress/expected/with_join.out @@ -3,9 +3,9 @@ SET search_path TO with_join, public; SET citus.next_shard_id TO 1501000; CREATE TABLE with_join.reference_table(user_id int); SELECT create_reference_table('with_join.reference_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO reference_table VALUES (6), (7); @@ -36,8 +36,8 @@ GROUP BY 1 ORDER BY 2 DESC, 1; - user_id | count ----------+------- + user_id | count +--------------------------------------------------------------------- 3 | 30168 4 | 27768 2 | 25327 @@ -72,8 +72,8 @@ GROUP BY 1 ORDER BY 2 DESC, 1; - user_id | count ----------+------- + user_id | count +--------------------------------------------------------------------- 2 | 67507 4 | 23040 3 | 14580 @@ -142,8 +142,8 @@ ORDER BY 1 DESC LIMIT 5; - uid ------ + uid +--------------------------------------------------------------------- 6 5 4 @@ -181,8 +181,8 @@ ORDER BY 1,2,3 LIMIT 5; - user_id | time | event_type ----------+---------------------------------+------------ + user_id | time | event_type +--------------------------------------------------------------------- 1 | Wed Nov 22 22:51:43.132261 2017 | 0 1 | Wed Nov 22 22:51:43.132261 2017 | 0 1 | Wed Nov 22 22:51:43.132261 2017 | 1 @@ -204,8 +204,8 @@ ORDER BY 1,2,3 LIMIT 5; - user_id | time | event_type ----------+---------------------------------+------------ + user_id | time | event_type +--------------------------------------------------------------------- 1 | Thu Nov 23 09:26:42.145043 2017 | 0 1 | Thu Nov 23 09:26:42.145043 2017 | 0 1 | Thu Nov 23 09:26:42.145043 2017 | 1 @@ -259,10 +259,10 @@ ORDER BY user_id LIMIT 5; - row_number | user_id -------------+--------- + row_number | user_id +--------------------------------------------------------------------- 2 | 6 - 1 | + 1 | (2 rows) -- some more tests for more complex outer-joins @@ -272,27 +272,27 @@ CREATE TABLE distributed_2 (col1 int, col2 int, distrib_col int); CREATE TABLE reference_1 (col1 int, col2 int); CREATE TABLE reference_2(col1 int, col2 int); SELECT create_distributed_table('distributed_1','distrib_col'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_distributed_table('distributed_2','distrib_col'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('reference_1'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) SELECT create_reference_table('reference_2'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) INSERT INTO distributed_1 SELECT i, i, i FROM generate_series(0,100) i; @@ -303,8 +303,8 @@ select count(*) from distributed_1 AS d1 LEFT JOIN reference_1 AS r1 ON d1.col2=r1.col2 LEFT JOIN reference_2 AS r2 ON r2.col1 = r1.col1 join (select distrib_col,count(*) from distributed_2 group by distrib_col) d2 ON d2.distrib_col=d1.distrib_col; - count -------- + count +--------------------------------------------------------------------- 1734 (1 row) @@ -313,8 +313,8 @@ select count(*) from distributed_1 AS d1 LEFT JOIN reference_1 AS r1 ON d1.col2=r1.col2 LEFT JOIN reference_2 AS r2 ON r2.col1 = r1.col1 join d2 ON d2.distrib_col=d1.distrib_col; - count -------- + count +--------------------------------------------------------------------- 1734 (1 row) @@ -323,8 +323,8 @@ select count(*) from distributed_1 AS d1 LEFT JOIN reference_1 AS r1 ON d1.col2=r1.col2 LEFT JOIN reference_2 AS r2 ON r2.col1 = r1.col1 join d2 ON d2.distrib_col=d1.distrib_col; - count -------- + count +--------------------------------------------------------------------- 87584 (1 row) @@ -333,8 +333,8 @@ select count(*) from distributed_1 AS d1 LEFT JOIN reference_1 AS r1 ON d1.col2=r1.col2 LEFT JOIN reference_2 AS r2 ON r2.col1 = r1.col1 join cte_1 ON cte_1.col1=d1.distrib_col; - count -------- + count +--------------------------------------------------------------------- 86181 (1 row) diff --git a/src/test/regress/expected/with_modifying.out b/src/test/regress/expected/with_modifying.out index 41306b7d7..cb2279724 100644 --- a/src/test/regress/expected/with_modifying.out +++ b/src/test/regress/expected/with_modifying.out @@ -4,31 +4,31 @@ CREATE SCHEMA with_modifying; SET search_path TO with_modifying, public; CREATE TABLE with_modifying.modify_table (id int, val int); SELECT create_distributed_table('modify_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE with_modifying.users_table (LIKE public.users_table INCLUDING ALL); SELECT create_distributed_table('with_modifying.users_table', 'user_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO with_modifying.users_table SELECT * FROM public.users_table; CREATE TABLE with_modifying.summary_table (id int, counter int); SELECT create_distributed_table('summary_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE with_modifying.anchor_table (id int); SELECT create_reference_table('anchor_table'); - create_reference_table ------------------------- - + create_reference_table +--------------------------------------------------------------------- + (1 row) -- basic insert query in CTE @@ -41,11 +41,11 @@ FROM basic_insert ORDER BY user_id; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- - 1 | | | | | - 2 | | | | | - 3 | | | | | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | | | | | + 2 | | | | | + 3 | | | | | (3 rows) -- single-shard UPDATE in CTE @@ -60,17 +60,17 @@ ORDER BY user_id, time LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 41 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 41 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 41 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 41 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 41 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 41 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 41 | - 1 | | | | 41 | - 1 | | | | 41 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 41 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 41 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 41 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 41 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 41 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 41 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 41 | + 1 | | | | 41 | + 1 | | | | 41 | (9 rows) -- multi-shard UPDATE in CTE @@ -85,18 +85,18 @@ ORDER BY user_id, time LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 42 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 42 | - 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 42 | - 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 42 | - 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 42 | - 4 | Wed Nov 22 23:59:46.493416 2017 | 3 | 1 | 42 | - 4 | Thu Nov 23 01:55:21.824618 2017 | 3 | 1 | 42 | - 4 | Thu Nov 23 06:50:08.101207 2017 | 2 | 1 | 42 | - 4 | Thu Nov 23 07:09:37.382372 2017 | 4 | 1 | 42 | - 4 | Thu Nov 23 08:38:45.877401 2017 | 4 | 1 | 42 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 42 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 42 | + 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 42 | + 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 42 | + 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 42 | + 4 | Wed Nov 22 23:59:46.493416 2017 | 3 | 1 | 42 | + 4 | Thu Nov 23 01:55:21.824618 2017 | 3 | 1 | 42 | + 4 | Thu Nov 23 06:50:08.101207 2017 | 2 | 1 | 42 | + 4 | Thu Nov 23 07:09:37.382372 2017 | 4 | 1 | 42 | + 4 | Thu Nov 23 08:38:45.877401 2017 | 4 | 1 | 42 | (10 rows) -- single-shard DELETE in CTE @@ -111,18 +111,18 @@ ORDER BY user_id, time LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 6 | Wed Nov 22 20:15:53.317797 2017 | 1 | 1 | 42 | - 6 | Wed Nov 22 23:01:24.82289 2017 | 2 | 4 | 1 | - 6 | Thu Nov 23 00:07:11.068353 2017 | 1 | 1 | 42 | - 6 | Thu Nov 23 00:09:44.19812 2017 | 5 | 2 | 0 | - 6 | Thu Nov 23 01:13:50.526322 2017 | 2 | 4 | 1 | - 6 | Thu Nov 23 01:14:55.769581 2017 | 0 | 0 | 5 | - 6 | Thu Nov 23 10:22:11.02918 2017 | 5 | 0 | 5 | - 6 | Thu Nov 23 11:08:04.244582 2017 | 2 | 3 | 2 | - 6 | Thu Nov 23 13:51:16.92838 2017 | 0 | 4 | 2 | - 6 | Thu Nov 23 14:43:18.024104 2017 | 3 | 2 | 5 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 6 | Wed Nov 22 20:15:53.317797 2017 | 1 | 1 | 42 | + 6 | Wed Nov 22 23:01:24.82289 2017 | 2 | 4 | 1 | + 6 | Thu Nov 23 00:07:11.068353 2017 | 1 | 1 | 42 | + 6 | Thu Nov 23 00:09:44.19812 2017 | 5 | 2 | 0 | + 6 | Thu Nov 23 01:13:50.526322 2017 | 2 | 4 | 1 | + 6 | Thu Nov 23 01:14:55.769581 2017 | 0 | 0 | 5 | + 6 | Thu Nov 23 10:22:11.02918 2017 | 5 | 0 | 5 | + 6 | Thu Nov 23 11:08:04.244582 2017 | 2 | 3 | 2 | + 6 | Thu Nov 23 13:51:16.92838 2017 | 0 | 4 | 2 | + 6 | Thu Nov 23 14:43:18.024104 2017 | 3 | 2 | 5 | (10 rows) -- multi-shard DELETE in CTE @@ -137,17 +137,17 @@ ORDER BY user_id, time LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 41 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 41 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 41 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 41 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 41 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 41 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 41 | - 1 | | | | 41 | - 1 | | | | 41 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 41 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 41 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 41 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 41 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 41 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 41 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 41 | + 1 | | | | 41 | + 1 | | | | 41 | (9 rows) -- INSERT...SELECT query in CTE @@ -162,18 +162,18 @@ ORDER BY user_id, time LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 42 | - 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | - 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | - 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 42 | - 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | - 3 | Thu Nov 23 03:52:32.008895 2017 | 4 | 2 | 0 | - 3 | Thu Nov 23 04:01:08.04806 2017 | 5 | 5 | 3 | - 3 | Thu Nov 23 05:01:44.885505 2017 | 3 | 5 | 4 | - 3 | Thu Nov 23 06:20:05.854857 2017 | 1 | 4 | 2 | - 3 | Thu Nov 23 09:57:41.540228 2017 | 2 | 2 | 3 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 42 | + 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | + 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | + 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 42 | + 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | + 3 | Thu Nov 23 03:52:32.008895 2017 | 4 | 2 | 0 | + 3 | Thu Nov 23 04:01:08.04806 2017 | 5 | 5 | 3 | + 3 | Thu Nov 23 05:01:44.885505 2017 | 3 | 5 | 4 | + 3 | Thu Nov 23 06:20:05.854857 2017 | 1 | 4 | 2 | + 3 | Thu Nov 23 09:57:41.540228 2017 | 2 | 2 | 3 | (10 rows) -- CTEs prior to INSERT...SELECT via the coordinator should work @@ -232,8 +232,8 @@ WITH raw_data AS ( ) INSERT INTO summary_table SELECT id, COUNT(*) AS counter FROM raw_data GROUP BY id; SELECT * FROM summary_table ORDER BY id; - id | counter -----+--------- + id | counter +--------------------------------------------------------------------- 2 | 20 3 | 38 4 | 24 @@ -241,8 +241,8 @@ SELECT * FROM summary_table ORDER BY id; (4 rows) SELECT COUNT(*) FROM modify_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -252,8 +252,8 @@ WITH raw_data AS ( ) INSERT INTO summary_table SELECT id, COUNT(*) AS counter FROM raw_data GROUP BY id; SELECT * FROM summary_table ORDER BY id, counter; - id | counter -----+--------- + id | counter +--------------------------------------------------------------------- 1 | 1 2 | 1 2 | 20 @@ -264,8 +264,8 @@ SELECT * FROM summary_table ORDER BY id, counter; (7 rows) SELECT COUNT(*) FROM modify_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -273,8 +273,8 @@ WITH insert_reference AS ( INSERT INTO anchor_table VALUES (1), (2) RETURNING * ) SELECT id FROM insert_reference ORDER BY id; - id ----- + id +--------------------------------------------------------------------- 1 2 (2 rows) @@ -298,14 +298,14 @@ SELECT id, SUM(counter) FROM ( GROUP BY id; SELECT COUNT(*) FROM modify_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM summary_table ORDER BY id, counter; - id | counter -----+--------- + id | counter +--------------------------------------------------------------------- 1 | 1 2 | 21 3 | 39 @@ -321,14 +321,14 @@ raw_data AS ( ) INSERT INTO summary_table SELECT id, COUNT(*) AS counter FROM raw_data GROUP BY id; SELECT COUNT(*) FROM modify_table; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) SELECT * FROM summary_table ORDER BY id, counter; - id | counter -----+--------- + id | counter +--------------------------------------------------------------------- 1 | 1 1 | 1 2 | 21 @@ -343,8 +343,8 @@ WITH summary_data AS ( ) INSERT INTO summary_table SELECT id, SUM(counter) AS counter FROM summary_data GROUP BY id; SELECT * FROM summary_table ORDER BY id, counter; - id | counter -----+--------- + id | counter +--------------------------------------------------------------------- 1 | 2 2 | 21 3 | 39 @@ -353,16 +353,16 @@ SELECT * FROM summary_table ORDER BY id, counter; (5 rows) SELECT * FROM modify_table ORDER BY id, val; - id | val -----+----- + id | val +--------------------------------------------------------------------- 1 | 2 2 | 4 3 | 6 (3 rows) SELECT * FROM anchor_table ORDER BY id; - id ----- + id +--------------------------------------------------------------------- 1 2 (2 rows) @@ -379,42 +379,42 @@ INSERT INTO modify_table VALUES (21, 1), (22, 2), (23, 3); -- read ids from the same table WITH distinct_ids AS ( SELECT DISTINCT id FROM modify_table -), +), update_data AS ( - UPDATE modify_table SET val = 100 WHERE id > 10 AND + UPDATE modify_table SET val = 100 WHERE id > 10 AND id IN (SELECT * FROM distinct_ids) RETURNING * ) SELECT count(*) FROM update_data; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) -- read ids from a different table WITH distinct_ids AS ( SELECT DISTINCT id FROM summary_table -), +), update_data AS ( UPDATE modify_table SET val = 100 WHERE id > 10 AND id IN (SELECT * FROM distinct_ids) RETURNING * ) SELECT count(*) FROM update_data; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) -- test update with generate series -UPDATE modify_table SET val = 200 WHERE id > 10 AND +UPDATE modify_table SET val = 200 WHERE id > 10 AND id IN (SELECT 2*s FROM generate_series(1,20) s); -- test update with generate series in CTE WITH update_data AS ( - UPDATE modify_table SET val = 300 WHERE id > 10 AND + UPDATE modify_table SET val = 300 WHERE id > 10 AND id IN (SELECT 3*s FROM generate_series(1,20) s) RETURNING * ) SELECT COUNT(*) FROM update_data; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) @@ -422,8 +422,8 @@ WITH delete_rows AS ( DELETE FROM modify_table WHERE id > 10 RETURNING * ) SELECT * FROM delete_rows ORDER BY id, val; - id | val -----+----- + id | val +--------------------------------------------------------------------- 21 | 300 22 | 200 23 | 100 @@ -433,8 +433,8 @@ WITH delete_rows AS ( DELETE FROM summary_table WHERE id > 10 RETURNING * ) SELECT * FROM delete_rows ORDER BY id, counter; - id | counter -----+--------- + id | counter +--------------------------------------------------------------------- 11 | 1 12 | 1 13 | 1 @@ -450,15 +450,15 @@ WITH insert_reference AS ( INSERT INTO anchor_table VALUES (3), (4) RETURNING * ) SELECT id FROM insert_reference ORDER BY id; - id ----- + id +--------------------------------------------------------------------- 3 4 (2 rows) SELECT * FROM summary_table ORDER BY id, counter; - id | counter -----+--------- + id | counter +--------------------------------------------------------------------- 1 | 1 1 | 2 2 | 1 @@ -470,13 +470,13 @@ SELECT * FROM summary_table ORDER BY id, counter; (8 rows) SELECT * FROM modify_table ORDER BY id, val; - id | val -----+----- + id | val +--------------------------------------------------------------------- (0 rows) SELECT * FROM anchor_table ORDER BY id; - id ----- + id +--------------------------------------------------------------------- 1 2 3 @@ -485,8 +485,8 @@ SELECT * FROM anchor_table ORDER BY id; ROLLBACK; SELECT * FROM summary_table ORDER BY id, counter; - id | counter -----+--------- + id | counter +--------------------------------------------------------------------- 1 | 2 2 | 21 3 | 39 @@ -495,16 +495,16 @@ SELECT * FROM summary_table ORDER BY id, counter; (5 rows) SELECT * FROM modify_table ORDER BY id, val; - id | val -----+----- + id | val +--------------------------------------------------------------------- 1 | 2 2 | 4 3 | 6 (3 rows) SELECT * FROM anchor_table ORDER BY id; - id ----- + id +--------------------------------------------------------------------- 1 2 (2 rows) @@ -514,8 +514,8 @@ WITH deleted_rows AS ( DELETE FROM modify_table WHERE id IN (SELECT id FROM modify_table WHERE id = 1) RETURNING * ) SELECT * FROM deleted_rows; - id | val -----+----- + id | val +--------------------------------------------------------------------- 1 | 2 (1 row) @@ -523,8 +523,8 @@ WITH deleted_rows AS ( DELETE FROM modify_table WHERE id IN (SELECT id FROM modify_table WHERE val = 4) RETURNING * ) SELECT * FROM deleted_rows; - id | val -----+----- + id | val +--------------------------------------------------------------------- 2 | 4 (1 row) @@ -535,16 +535,16 @@ deleted_rows AS ( DELETE FROM modify_table WHERE id IN (SELECT id FROM select_rows) RETURNING * ) SELECT * FROM deleted_rows; - id | val -----+----- + id | val +--------------------------------------------------------------------- (0 rows) WITH deleted_rows AS ( DELETE FROM modify_table WHERE val IN (SELECT val FROM modify_table WHERE id = 3) RETURNING * ) SELECT * FROM deleted_rows; - id | val -----+----- + id | val +--------------------------------------------------------------------- 3 | 6 (1 row) @@ -555,8 +555,8 @@ deleted_rows AS ( DELETE FROM modify_table WHERE val IN (SELECT val FROM select_rows) RETURNING * ) SELECT * FROM deleted_rows; - id | val -----+----- + id | val +--------------------------------------------------------------------- (0 rows) WITH deleted_rows AS ( @@ -596,8 +596,8 @@ raw_data AS ( DELETE FROM modify_table WHERE id = 1 AND val IN (SELECT val FROM select_data) RETURNING * ) SELECT COUNT(*) FROM raw_data; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -609,8 +609,8 @@ raw_data AS ( DELETE FROM modify_table WHERE id IN (SELECT id FROM select_data WHERE val > 5) RETURNING id, val ) SELECT * FROM raw_data ORDER BY val; - id | val -----+----- + id | val +--------------------------------------------------------------------- 1 | 2 1 | 6 (2 rows) @@ -622,14 +622,14 @@ raw_data AS ( UPDATE modify_table SET val = 0 WHERE id IN (SELECT id FROM select_data WHERE val < 5) RETURNING id, val ) SELECT * FROM raw_data ORDER BY val; - id | val -----+----- + id | val +--------------------------------------------------------------------- 2 | 0 (1 row) SELECT * FROM modify_table ORDER BY id, val; - id | val -----+----- + id | val +--------------------------------------------------------------------- 2 | 0 3 | 5 (2 rows) @@ -639,14 +639,14 @@ WITH select_data AS ( SELECT * FROM modify_table ), raw_data AS ( - UPDATE modify_table SET val = 0 WHERE - id IN (SELECT id FROM select_data) AND + UPDATE modify_table SET val = 0 WHERE + id IN (SELECT id FROM select_data) AND val IN (SELECT counter FROM summary_table) RETURNING id, val ) SELECT * FROM raw_data ORDER BY val; - id | val -----+----- + id | val +--------------------------------------------------------------------- (0 rows) -- Test with replication factor 2 @@ -654,29 +654,29 @@ SET citus.shard_replication_factor to 2; DROP TABLE modify_table; CREATE TABLE with_modifying.modify_table (id int, val int); SELECT create_distributed_table('modify_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO with_modifying.modify_table SELECT user_id, value_1 FROM public.users_table; DROP TABLE summary_table; CREATE TABLE with_modifying.summary_table (id int, counter int); SELECT create_distributed_table('summary_table', 'id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) SELECT COUNT(*) FROM modify_table; - count -------- + count +--------------------------------------------------------------------- 107 (1 row) SELECT * FROM summary_table ORDER BY id, counter; - id | counter -----+--------- + id | counter +--------------------------------------------------------------------- (0 rows) WITH raw_data AS ( @@ -684,14 +684,14 @@ WITH raw_data AS ( ) INSERT INTO summary_table SELECT id, COUNT(*) AS counter FROM raw_data GROUP BY id; SELECT COUNT(*) FROM modify_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM summary_table ORDER BY id, counter; - id | counter -----+--------- + id | counter +--------------------------------------------------------------------- 1 | 8 2 | 19 3 | 18 @@ -705,17 +705,17 @@ SELECT * FROM summary_table ORDER BY id, counter; BEGIN; INSERT INTO modify_table (id) VALUES (10000); WITH test_cte AS (SELECT count(*) FROM modify_table) SELECT * FROM test_cte; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) ROLLBACK; -- similarly, make sure that the intermediate result uses a seperate connection - WITH first_query AS (INSERT INTO modify_table (id) VALUES (10001)), + WITH first_query AS (INSERT INTO modify_table (id) VALUES (10001)), second_query AS (SELECT * FROM modify_table) SELECT count(*) FROM second_query; - count -------- + count +--------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/with_nested.out b/src/test/regress/expected/with_nested.out index 6002b27e8..92ec7b271 100644 --- a/src/test/regress/expected/with_nested.out +++ b/src/test/regress/expected/with_nested.out @@ -14,8 +14,8 @@ cte_2 AS ( SELECT * FROM cte_1_1 WHERE user_id < 3 ) SELECT user_id FROM cte_2 LIMIT 1; - user_id ---------- + user_id +--------------------------------------------------------------------- 2 (1 row) @@ -49,8 +49,8 @@ FROM ORDER BY 1, 2 LIMIT 20; - user_id | event_type ----------+------------ + user_id | event_type +--------------------------------------------------------------------- 1 | 0 1 | 0 1 | 0 @@ -151,8 +151,8 @@ FROM users_events GROUP BY 1; - uid | avg | sum | sum ------+------------------------+-----+----- + uid | avg | sum | sum +--------------------------------------------------------------------- 1 | 1.00000000000000000000 | 3 | 72 (1 row) @@ -228,28 +228,28 @@ ORDER BY 1, 2, 3, 4, 5, 6 LIMIT 20; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 1 | | | | | - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | - 2 | Thu Nov 23 06:23:53.572592 2017 | 4 | 4 | 5 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 1 | | | | | + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | + 2 | Thu Nov 23 06:23:53.572592 2017 | 4 | 4 | 5 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (20 rows) -- Nested CTEs - joined with local table. Not supported yet. @@ -318,28 +318,28 @@ ORDER BY 1, 2, 3, 4, 5, 6 LIMIT 20; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 1 | | | | | - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | - 2 | Thu Nov 23 06:23:53.572592 2017 | 4 | 4 | 5 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 1 | | | | | + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | + 2 | Thu Nov 23 06:23:53.572592 2017 | 4 | 4 | 5 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (20 rows) -- access to uncle, use window function, apply aggregates, use group by, LIMIT/OFFSET @@ -396,8 +396,8 @@ cte2 AS ( SELECT * FROM cte2_1 ORDER BY 1,2,3,4 LIMIT 3 OFFSET 2 ) SELECT * FROM cte2; - user_id | time | value_1 | min ----------+---------------------------------+---------+----- + user_id | time | value_1 | min +--------------------------------------------------------------------- 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 5 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 5 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 5 diff --git a/src/test/regress/expected/with_partitioning.out b/src/test/regress/expected/with_partitioning.out index 95c415b3c..0f7d40e4e 100644 --- a/src/test/regress/expected/with_partitioning.out +++ b/src/test/regress/expected/with_partitioning.out @@ -16,9 +16,9 @@ INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03'); SELECT create_distributed_table('with_partitioning.partitioning_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -- Join of a CTE on distributed table and then join with a partitioned table @@ -26,8 +26,8 @@ WITH cte AS ( SELECT * FROM users_table ) SELECT DISTINCT ON (id) id, cte.time FROM cte join partitioning_test on cte.time::date=partitioning_test.time ORDER BY 1, 2 LIMIT 3; - id | time -----+--------------------------------- + id | time +--------------------------------------------------------------------- 1 | Thu Nov 23 00:07:11.068353 2017 3 | Wed Nov 22 18:19:49.944985 2017 (2 rows) @@ -37,8 +37,8 @@ WITH cte AS ( SELECT * FROM users_table ) SELECT DISTINCT ON (id) id, cte.time FROM cte join partitioning_test on cte.time::date=partitioning_test.time WHERE partitioning_test.time >'2017-11-20' ORDER BY 1, 2 LIMIT 3; - id | time -----+--------------------------------- + id | time +--------------------------------------------------------------------- 1 | Thu Nov 23 00:07:11.068353 2017 3 | Wed Nov 22 18:19:49.944985 2017 (2 rows) @@ -57,8 +57,8 @@ cte_joined_2 AS ( SELECT user_id, cte_joined.time FROM cte_joined join cte on (cte_joined.time = cte.time) ) SELECT DISTINCT ON (event_type) event_type, cte_joined_2.user_id FROM events_table join cte_joined_2 on (cte_joined_2.time=events_table.time::date) ORDER BY 1, 2 LIMIT 10 OFFSET 2; - event_type | user_id -------------+--------- + event_type | user_id +--------------------------------------------------------------------- 2 | 1 3 | 1 4 | 1 @@ -81,8 +81,8 @@ cte_joined_2 AS ( SELECT users_table.user_id, cte_joined.time FROM cte_joined join users_table on (cte_joined.time = users_table.time::date) ) SELECT DISTINCT ON (id) id, cte_joined_2.time FROM cte_joined_2 join partitioning_test on (cte_joined_2.time=partitioning_test.time) ORDER BY 1, 2; - id | time -----+------------ + id | time +--------------------------------------------------------------------- 1 | 11-23-2017 3 | 11-22-2017 (2 rows) diff --git a/src/test/regress/expected/with_prepare.out b/src/test/regress/expected/with_prepare.out index 4a1a55129..9f6d85052 100644 --- a/src/test/regress/expected/with_prepare.out +++ b/src/test/regress/expected/with_prepare.out @@ -4,7 +4,7 @@ WITH basic AS( SELECT * FROM users_table ) SELECT - * + * FROM basic WHERE @@ -47,7 +47,7 @@ user_coolness AS( user_id ) SELECT - * + * FROM user_coolness ORDER BY @@ -58,7 +58,7 @@ PREPARE prepared_test_3(integer) AS WITH users_events AS( -- events 1 and 2 only WITH spec_events AS( - SELECT + SELECT * FROM events_table @@ -111,7 +111,7 @@ user_coolness AS( user_id ) SELECT - * + * FROM user_coolness ORDER BY @@ -123,7 +123,7 @@ WITH basic AS( SELECT * FROM users_table WHERE value_2 IN ($1, $2, $3) ) SELECT - * + * FROM basic ORDER BY @@ -181,104 +181,104 @@ WITH event_id AS ( FROM events_table ) SELECT - count(*) + count(*) FROM event_id WHERE events_user_id IN (SELECT user_id FROM users_table); EXECUTE prepared_test_1; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (10 rows) EXECUTE prepared_test_1; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (10 rows) EXECUTE prepared_test_1; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (10 rows) EXECUTE prepared_test_1; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (10 rows) EXECUTE prepared_test_1; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (10 rows) EXECUTE prepared_test_1; - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (10 rows) EXECUTE prepared_test_2; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 1 | 29 3 | 29 6 | 29 @@ -288,8 +288,8 @@ EXECUTE prepared_test_2; (6 rows) EXECUTE prepared_test_2; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 1 | 29 3 | 29 6 | 29 @@ -299,8 +299,8 @@ EXECUTE prepared_test_2; (6 rows) EXECUTE prepared_test_2; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 1 | 29 3 | 29 6 | 29 @@ -310,8 +310,8 @@ EXECUTE prepared_test_2; (6 rows) EXECUTE prepared_test_2; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 1 | 29 3 | 29 6 | 29 @@ -321,8 +321,8 @@ EXECUTE prepared_test_2; (6 rows) EXECUTE prepared_test_2; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 1 | 29 3 | 29 6 | 29 @@ -332,8 +332,8 @@ EXECUTE prepared_test_2; (6 rows) EXECUTE prepared_test_2; - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- 1 | 29 3 | 29 6 | 29 @@ -343,8 +343,8 @@ EXECUTE prepared_test_2; (6 rows) EXECUTE prepared_test_3(1); - user_id | sum ----------+-------- + user_id | sum +--------------------------------------------------------------------- 1 | 10850 6 | 15500 3 | 52700 @@ -354,8 +354,8 @@ EXECUTE prepared_test_3(1); (6 rows) EXECUTE prepared_test_3(2); - user_id | sum ----------+------- + user_id | sum +--------------------------------------------------------------------- 1 | 10850 6 | 15500 5 | 20150 @@ -365,265 +365,265 @@ EXECUTE prepared_test_3(2); (6 rows) EXECUTE prepared_test_3(3); - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- (0 rows) EXECUTE prepared_test_3(4); - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- (0 rows) EXECUTE prepared_test_3(5); - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- (0 rows) EXECUTE prepared_test_3(6); - user_id | sum ----------+----- + user_id | sum +--------------------------------------------------------------------- (0 rows) EXECUTE prepared_test_4(1, 2, 3); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (10 rows) EXECUTE prepared_test_4(2, 3, 4); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | (10 rows) EXECUTE prepared_test_4(3, 4, 5); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | - 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | + 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | (10 rows) EXECUTE prepared_test_4(4, 5, 6); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | - 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | - 2 | Thu Nov 23 06:23:53.572592 2017 | 4 | 4 | 5 | - 2 | Thu Nov 23 11:41:04.042936 2017 | 3 | 4 | 1 | - 2 | Thu Nov 23 11:48:24.943542 2017 | 0 | 5 | 5 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | + 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | + 2 | Thu Nov 23 06:23:53.572592 2017 | 4 | 4 | 5 | + 2 | Thu Nov 23 11:41:04.042936 2017 | 3 | 4 | 1 | + 2 | Thu Nov 23 11:48:24.943542 2017 | 0 | 5 | 5 | (10 rows) EXECUTE prepared_test_4(5, 6, 7); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 11:48:24.943542 2017 | 0 | 5 | 5 | - 3 | Thu Nov 23 04:01:08.04806 2017 | 5 | 5 | 3 | - 3 | Thu Nov 23 05:01:44.885505 2017 | 3 | 5 | 4 | - 3 | Thu Nov 23 11:31:17.403189 2017 | 4 | 5 | 3 | - 3 | Thu Nov 23 11:41:21.157066 2017 | 3 | 5 | 3 | - 3 | Thu Nov 23 12:56:49.29191 2017 | 0 | 5 | 1 | - 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | - 4 | Thu Nov 23 07:28:42.537255 2017 | 3 | 5 | 3 | - 4 | Thu Nov 23 11:45:39.744961 2017 | 4 | 5 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 11:48:24.943542 2017 | 0 | 5 | 5 | + 3 | Thu Nov 23 04:01:08.04806 2017 | 5 | 5 | 3 | + 3 | Thu Nov 23 05:01:44.885505 2017 | 3 | 5 | 4 | + 3 | Thu Nov 23 11:31:17.403189 2017 | 4 | 5 | 3 | + 3 | Thu Nov 23 11:41:21.157066 2017 | 3 | 5 | 3 | + 3 | Thu Nov 23 12:56:49.29191 2017 | 0 | 5 | 1 | + 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | + 4 | Thu Nov 23 07:28:42.537255 2017 | 3 | 5 | 3 | + 4 | Thu Nov 23 11:45:39.744961 2017 | 4 | 5 | 4 | (10 rows) EXECUTE prepared_test_4(6, 7, 8); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- (0 rows) EXECUTE prepared_test_5(1, 2, 3); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | (10 rows) EXECUTE prepared_test_5(2, 3, 4); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | - 2 | Thu Nov 23 06:23:53.572592 2017 | 4 | 4 | 5 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | + 2 | Thu Nov 23 06:23:53.572592 2017 | 4 | 4 | 5 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | (10 rows) EXECUTE prepared_test_5(3, 4, 5); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | - 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | - 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | - 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 4 | - 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | - 3 | Thu Nov 23 03:52:32.008895 2017 | 4 | 2 | 0 | - 3 | Thu Nov 23 04:01:08.04806 2017 | 5 | 5 | 3 | - 3 | Thu Nov 23 05:01:44.885505 2017 | 3 | 5 | 4 | - 3 | Thu Nov 23 06:20:05.854857 2017 | 1 | 4 | 2 | - 3 | Thu Nov 23 09:57:41.540228 2017 | 2 | 2 | 3 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | + 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | + 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | + 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 4 | + 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | + 3 | Thu Nov 23 03:52:32.008895 2017 | 4 | 2 | 0 | + 3 | Thu Nov 23 04:01:08.04806 2017 | 5 | 5 | 3 | + 3 | Thu Nov 23 05:01:44.885505 2017 | 3 | 5 | 4 | + 3 | Thu Nov 23 06:20:05.854857 2017 | 1 | 4 | 2 | + 3 | Thu Nov 23 09:57:41.540228 2017 | 2 | 2 | 3 | (10 rows) EXECUTE prepared_test_5(4, 5, 6); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 4 | Wed Nov 22 21:33:03.616802 2017 | 5 | 4 | 1 | - 4 | Wed Nov 22 23:48:11.949567 2017 | 2 | 0 | 0 | - 4 | Wed Nov 22 23:59:46.493416 2017 | 3 | 1 | 3 | - 4 | Thu Nov 23 00:28:45.060248 2017 | 4 | 4 | 3 | - 4 | Thu Nov 23 01:55:21.824618 2017 | 3 | 1 | 4 | - 4 | Thu Nov 23 02:14:35.047974 2017 | 4 | 4 | 1 | - 4 | Thu Nov 23 03:34:40.419294 2017 | 1 | 0 | 4 | - 4 | Thu Nov 23 05:42:12.89386 2017 | 2 | 3 | 3 | - 4 | Thu Nov 23 06:39:06.287818 2017 | 3 | 3 | 2 | - 4 | Thu Nov 23 06:50:08.101207 2017 | 2 | 1 | 5 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 4 | Wed Nov 22 21:33:03.616802 2017 | 5 | 4 | 1 | + 4 | Wed Nov 22 23:48:11.949567 2017 | 2 | 0 | 0 | + 4 | Wed Nov 22 23:59:46.493416 2017 | 3 | 1 | 3 | + 4 | Thu Nov 23 00:28:45.060248 2017 | 4 | 4 | 3 | + 4 | Thu Nov 23 01:55:21.824618 2017 | 3 | 1 | 4 | + 4 | Thu Nov 23 02:14:35.047974 2017 | 4 | 4 | 1 | + 4 | Thu Nov 23 03:34:40.419294 2017 | 1 | 0 | 4 | + 4 | Thu Nov 23 05:42:12.89386 2017 | 2 | 3 | 3 | + 4 | Thu Nov 23 06:39:06.287818 2017 | 3 | 3 | 2 | + 4 | Thu Nov 23 06:50:08.101207 2017 | 2 | 1 | 5 | (10 rows) EXECUTE prepared_test_5(5, 6, 7); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 5 | Wed Nov 22 20:43:18.667473 2017 | 0 | 3 | 2 | - 5 | Wed Nov 22 21:02:07.575129 2017 | 2 | 0 | 2 | - 5 | Wed Nov 22 22:10:24.315371 2017 | 1 | 2 | 1 | - 5 | Wed Nov 22 22:31:47.62577 2017 | 3 | 1 | 4 | - 5 | Wed Nov 22 23:10:42.777699 2017 | 3 | 4 | 5 | - 5 | Thu Nov 23 00:46:13.498577 2017 | 3 | 2 | 2 | - 5 | Thu Nov 23 00:54:44.192608 2017 | 1 | 3 | 2 | - 5 | Thu Nov 23 02:09:42.27857 2017 | 3 | 2 | 4 | - 5 | Thu Nov 23 02:50:32.678074 2017 | 4 | 2 | 4 | - 5 | Thu Nov 23 06:35:05.166535 2017 | 5 | 5 | 1 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 5 | Wed Nov 22 20:43:18.667473 2017 | 0 | 3 | 2 | + 5 | Wed Nov 22 21:02:07.575129 2017 | 2 | 0 | 2 | + 5 | Wed Nov 22 22:10:24.315371 2017 | 1 | 2 | 1 | + 5 | Wed Nov 22 22:31:47.62577 2017 | 3 | 1 | 4 | + 5 | Wed Nov 22 23:10:42.777699 2017 | 3 | 4 | 5 | + 5 | Thu Nov 23 00:46:13.498577 2017 | 3 | 2 | 2 | + 5 | Thu Nov 23 00:54:44.192608 2017 | 1 | 3 | 2 | + 5 | Thu Nov 23 02:09:42.27857 2017 | 3 | 2 | 4 | + 5 | Thu Nov 23 02:50:32.678074 2017 | 4 | 2 | 4 | + 5 | Thu Nov 23 06:35:05.166535 2017 | 5 | 5 | 1 | (10 rows) EXECUTE prepared_test_5(6, 7, 8); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- - 6 | Wed Nov 22 20:15:53.317797 2017 | 1 | 1 | 1 | - 6 | Wed Nov 22 23:01:24.82289 2017 | 2 | 4 | 1 | - 6 | Thu Nov 23 00:07:11.068353 2017 | 1 | 1 | 4 | - 6 | Thu Nov 23 00:09:44.19812 2017 | 5 | 2 | 0 | - 6 | Thu Nov 23 01:13:50.526322 2017 | 2 | 4 | 1 | - 6 | Thu Nov 23 01:14:55.769581 2017 | 0 | 0 | 5 | - 6 | Thu Nov 23 10:22:11.02918 2017 | 5 | 0 | 5 | - 6 | Thu Nov 23 11:08:04.244582 2017 | 2 | 3 | 2 | - 6 | Thu Nov 23 13:51:16.92838 2017 | 0 | 4 | 2 | - 6 | Thu Nov 23 14:43:18.024104 2017 | 3 | 2 | 5 | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 6 | Wed Nov 22 20:15:53.317797 2017 | 1 | 1 | 1 | + 6 | Wed Nov 22 23:01:24.82289 2017 | 2 | 4 | 1 | + 6 | Thu Nov 23 00:07:11.068353 2017 | 1 | 1 | 4 | + 6 | Thu Nov 23 00:09:44.19812 2017 | 5 | 2 | 0 | + 6 | Thu Nov 23 01:13:50.526322 2017 | 2 | 4 | 1 | + 6 | Thu Nov 23 01:14:55.769581 2017 | 0 | 0 | 5 | + 6 | Thu Nov 23 10:22:11.02918 2017 | 5 | 0 | 5 | + 6 | Thu Nov 23 11:08:04.244582 2017 | 2 | 3 | 2 | + 6 | Thu Nov 23 13:51:16.92838 2017 | 0 | 4 | 2 | + 6 | Thu Nov 23 14:43:18.024104 2017 | 3 | 2 | 5 | (10 rows) EXECUTE prepared_test_6; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_test_6; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_test_6; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_test_6; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_test_6; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_test_6; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_partition_column_insert(1); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- - 1 | | | | | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 1 | | | | | (1 row) EXECUTE prepared_partition_column_insert(2); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- - 2 | | | | | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 2 | | | | | (1 row) EXECUTE prepared_partition_column_insert(3); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- - 3 | | | | | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 3 | | | | | (1 row) EXECUTE prepared_partition_column_insert(4); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- - 4 | | | | | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 4 | | | | | (1 row) EXECUTE prepared_partition_column_insert(5); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- - 5 | | | | | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 5 | | | | | (1 row) EXECUTE prepared_partition_column_insert(6); - user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- - 6 | | | | | + user_id | time | value_1 | value_2 | value_3 | value_4 +--------------------------------------------------------------------- + 6 | | | | | (1 row) DEALLOCATE ALL; diff --git a/src/test/regress/expected/with_set_operations.out b/src/test/regress/expected/with_set_operations.out index bb82c67c3..d685c4246 100644 --- a/src/test/regress/expected/with_set_operations.out +++ b/src/test/regress/expected/with_set_operations.out @@ -3,16 +3,16 @@ -- =================================================================== SET client_min_messages TO DEBUG1; -- use ctes inside unions on the top level -WITH +WITH cte_1 AS (SELECT user_id FROM users_table), cte_2 AS (SELECT user_id FROM events_table) (SELECT * FROM cte_1) UNION (SELECT * FROM cte_2) ORDER BY 1 DESC; -DEBUG: generating subplan 1_1 for CTE cte_1: SELECT user_id FROM public.users_table -DEBUG: generating subplan 1_2 for CTE cte_2: SELECT user_id FROM public.events_table -DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('1_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 ORDER BY 1 DESC - user_id ---------- +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT user_id FROM public.events_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 ORDER BY 1 DESC + user_id +--------------------------------------------------------------------- 6 5 4 @@ -22,35 +22,35 @@ DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT cte_1.user_id F (6 rows) -- use ctes inside unions in a subquery -WITH +WITH cte_1 AS (SELECT user_id FROM users_table), cte_2 AS (SELECT user_id FROM events_table) -SELECT - count(*) +SELECT + count(*) FROM ( (SELECT * FROM cte_1) UNION (SELECT * FROM cte_2) ) as foo; -DEBUG: generating subplan 4_1 for CTE cte_1: SELECT user_id FROM public.users_table -DEBUG: generating subplan 4_2 for CTE cte_2: SELECT user_id FROM public.events_table -DEBUG: generating subplan 4_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('4_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 -DEBUG: Plan 4 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('4_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo - count -------- +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT user_id FROM public.events_table +DEBUG: generating subplan XXX_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo + count +--------------------------------------------------------------------- 6 (1 row) -- cte with unions of other ctes -WITH +WITH cte_1 AS (SELECT user_id FROM users_table), cte_2 AS (SELECT user_id FROM events_table), cte_3 AS ((SELECT * FROM cte_1) UNION (SELECT * FROM cte_2)) SELECT * FROM cte_3 ORDER BY 1 DESC; -DEBUG: generating subplan 8_1 for CTE cte_1: SELECT user_id FROM public.users_table -DEBUG: generating subplan 8_2 for CTE cte_2: SELECT user_id FROM public.events_table -DEBUG: generating subplan 8_3 for CTE cte_3: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 -DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('8_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_3 ORDER BY user_id DESC - user_id ---------- +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT user_id FROM public.events_table +DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_3 ORDER BY user_id DESC + user_id +--------------------------------------------------------------------- 6 5 4 @@ -60,16 +60,16 @@ DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT user_id FROM (S (6 rows) -- cte with unions of distributed table -WITH +WITH cte_1 AS ((SELECT user_id FROM users_table) UNION (SELECT user_id FROM users_table)) SELECT * FROM cte_1 ORDER BY 1 DESC; -DEBUG: generating subplan 12_1 for CTE cte_1: SELECT users_table.user_id FROM public.users_table UNION SELECT users_table.user_id FROM public.users_table -DEBUG: generating subplan 13_1 for subquery SELECT user_id FROM public.users_table -DEBUG: generating subplan 13_2 for subquery SELECT user_id FROM public.users_table -DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('13_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) -DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 ORDER BY user_id DESC - user_id ---------- +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT users_table.user_id FROM public.users_table UNION SELECT users_table.user_id FROM public.users_table +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 ORDER BY user_id DESC + user_id +--------------------------------------------------------------------- 6 5 4 @@ -79,17 +79,17 @@ DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT user_id FROM ( (6 rows) -- cte with unions of tables is intersected with another query -WITH +WITH cte_1 AS ((SELECT user_id FROM users_table) UNION (SELECT user_id FROM users_table)) (SELECT * FROM cte_1) INTERSECT (SELECT user_id FROM users_table) ORDER BY 1 DESC; -DEBUG: generating subplan 16_1 for CTE cte_1: SELECT users_table.user_id FROM public.users_table UNION SELECT users_table.user_id FROM public.users_table -DEBUG: generating subplan 17_1 for subquery SELECT user_id FROM public.users_table -DEBUG: generating subplan 17_2 for subquery SELECT user_id FROM public.users_table -DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('17_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) -DEBUG: generating subplan 16_2 for subquery SELECT user_id FROM public.users_table -DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('16_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) ORDER BY 1 DESC - user_id ---------- +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT users_table.user_id FROM public.users_table UNION SELECT users_table.user_id FROM public.users_table +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) ORDER BY 1 DESC + user_id +--------------------------------------------------------------------- 6 5 4 @@ -99,75 +99,75 @@ DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT cte_1.user_id (6 rows) -- cte with unions of tables is intersected with another query that involves except -WITH +WITH cte_1 AS ((SELECT user_id FROM users_table) UNION (SELECT user_id FROM users_table)) -(SELECT * FROM cte_1) - INTERSECT +(SELECT * FROM cte_1) + INTERSECT ((SELECT user_id FROM events_table WHERE user_id < 3) EXCEPT (SELECT user_id FROM users_table WHERE user_id > 4)) ORDER BY 1 DESC; -DEBUG: generating subplan 21_1 for CTE cte_1: SELECT users_table.user_id FROM public.users_table UNION SELECT users_table.user_id FROM public.users_table -DEBUG: generating subplan 22_1 for subquery SELECT user_id FROM public.users_table -DEBUG: generating subplan 22_2 for subquery SELECT user_id FROM public.users_table -DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('22_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) -DEBUG: generating subplan 21_2 for subquery SELECT user_id FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 3) -DEBUG: generating subplan 21_3 for subquery SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.>) 4) -DEBUG: Plan 21 query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('21_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 INTERSECT (SELECT intermediate_result.user_id FROM read_intermediate_result('21_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) EXCEPT SELECT intermediate_result.user_id FROM read_intermediate_result('21_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) ORDER BY 1 DESC - user_id ---------- +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT users_table.user_id FROM public.users_table UNION SELECT users_table.user_id FROM public.users_table +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 3) +DEBUG: generating subplan XXX_3 for subquery SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.>) 4) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 INTERSECT (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) EXCEPT SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) ORDER BY 1 DESC + user_id +--------------------------------------------------------------------- 2 1 (2 rows) -- CTE inside a top level EXCEPT -(WITH cte_1 AS (SELECT user_id FROM events_table WHERE user_id < 3) SELECT * FROM cte_1) INTERSECT (SELECT user_id FROM users_table) ORDER BY 1; -DEBUG: generating subplan 27_1 for CTE cte_1: SELECT user_id FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 3) -DEBUG: generating subplan 27_2 for subquery SELECT user_id FROM public.users_table -DEBUG: Plan 27 query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('27_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('27_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) ORDER BY 1 - user_id ---------- +(WITH cte_1 AS (SELECT user_id FROM events_table WHERE user_id < 3) SELECT * FROM cte_1) INTERSECT (SELECT user_id FROM users_table) ORDER BY 1; +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT user_id FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 3) +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) ORDER BY 1 + user_id +--------------------------------------------------------------------- 1 2 (2 rows) -- INTERSECT inside a CTE, which is inside a subquery -SELECT - DISTINCT users_table.user_id -FROM - users_table, - (WITH cte_1 AS (SELECT user_id FROM events_table WHERE user_id < 3 INTERSECT - SELECT user_id FROM events_table WHERE user_id < 2) +SELECT + DISTINCT users_table.user_id +FROM + users_table, + (WITH cte_1 AS (SELECT user_id FROM events_table WHERE user_id < 3 INTERSECT + SELECT user_id FROM events_table WHERE user_id < 2) SELECT * FROM cte_1) as foo WHERE users_table.user_id = foo.user_id ORDER BY 1 DESC; -DEBUG: generating subplan 30_1 for CTE cte_1: SELECT events_table.user_id FROM public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.<) 3) INTERSECT SELECT events_table.user_id FROM public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.<) 2) -DEBUG: generating subplan 31_1 for subquery SELECT user_id FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 3) -DEBUG: generating subplan 31_2 for subquery SELECT user_id FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 2) -DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('31_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) -DEBUG: Plan 30 query after replacing subqueries and CTEs: SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('30_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1) foo WHERE (users_table.user_id OPERATOR(pg_catalog.=) foo.user_id) ORDER BY users_table.user_id DESC - user_id ---------- +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT events_table.user_id FROM public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.<) 3) INTERSECT SELECT events_table.user_id FROM public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.<) 2) +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 3) +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 2) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1) foo WHERE (users_table.user_id OPERATOR(pg_catalog.=) foo.user_id) ORDER BY users_table.user_id DESC + user_id +--------------------------------------------------------------------- 1 (1 row) -- UNION is created via outputs of CTEs, which is inside a subquery -- and the subquery is joined with a distributed table -SELECT - count(*) -FROM +SELECT + count(*) +FROM events_table, ( - WITH + WITH cte_1 AS (SELECT user_id FROM users_table), cte_2 AS (SELECT user_id FROM events_table) (SELECT * FROM cte_1) UNION (SELECT * FROM cte_2) ) as foo WHERE foo.user_id = events_table.event_type; -DEBUG: generating subplan 34_1 for CTE cte_1: SELECT user_id FROM public.users_table -DEBUG: generating subplan 34_2 for CTE cte_2: SELECT user_id FROM public.events_table -DEBUG: generating subplan 34_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('34_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('34_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 -DEBUG: Plan 34 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.events_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('34_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.event_type) - count -------- +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT user_id FROM public.events_table +DEBUG: generating subplan XXX_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.events_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.event_type) + count +--------------------------------------------------------------------- 95 (1 row) @@ -176,11 +176,11 @@ DEBUG: Plan 34 query after replacing subqueries and CTEs: SELECT count(*) AS co INTERSECT (SELECT DISTINCT events_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id ) ORDER BY 1 DESC; -DEBUG: generating subplan 38_1 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) -DEBUG: generating subplan 38_2 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) -DEBUG: Plan 38 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('38_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('38_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) ORDER BY 1 DESC - user_id ---------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) ORDER BY 1 DESC + user_id +--------------------------------------------------------------------- 6 5 4 @@ -195,12 +195,12 @@ INTERSECT (SELECT DISTINCT events_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id LIMIT 10) ORDER BY 1 DESC; DEBUG: push down of limit count: 10 -DEBUG: generating subplan 41_1 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) LIMIT 10 +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) LIMIT 10 DEBUG: push down of limit count: 10 -DEBUG: generating subplan 41_2 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) LIMIT 10 -DEBUG: Plan 41 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('41_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('41_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) ORDER BY 1 DESC - user_id ---------- +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) LIMIT 10 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) ORDER BY 1 DESC + user_id +--------------------------------------------------------------------- 6 5 4 @@ -210,59 +210,59 @@ DEBUG: Plan 41 query after replacing subqueries and CTEs: SELECT intermediate_r (6 rows) -- joins inside unions that are not safe to pushdown inside a subquery -SELECT - count(*) -FROM +SELECT + count(*) +FROM (SELECT DISTINCT value_2 FROM events_table) as events_table, - (WITH foo AS + (WITH foo AS ((SELECT DISTINCT events_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id ) INTERSECT - (SELECT DISTINCT events_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id LIMIT 10)) - SELECT * FROM foo) + (SELECT DISTINCT events_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id LIMIT 10)) + SELECT * FROM foo) as foo -WHERE +WHERE foo.user_id = events_table.value_2; -DEBUG: generating subplan 44_1 for subquery SELECT DISTINCT value_2 FROM public.events_table -DEBUG: generating subplan 44_2 for CTE foo: SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) INTERSECT (SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) LIMIT 10) +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT value_2 FROM public.events_table +DEBUG: generating subplan XXX_2 for CTE foo: SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) INTERSECT (SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) LIMIT 10) DEBUG: push down of limit count: 10 -DEBUG: generating subplan 46_1 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) LIMIT 10 -DEBUG: generating subplan 46_2 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) -DEBUG: Plan 46 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('46_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('46_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) -DEBUG: Plan 44 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('44_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) events_table, (SELECT foo_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('44_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo_1) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.value_2) - count -------- +DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) LIMIT 10 +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) events_table, (SELECT foo_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo_1) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.value_2) + count +--------------------------------------------------------------------- 5 (1 row) -- joins inside unions some safe to pushdown -SELECT - count(*) -FROM +SELECT + count(*) +FROM (WITH events_table AS (SELECT DISTINCT user_id FROM events_table) SELECT * FROM events_table) as events_table, ((SELECT DISTINCT events_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id ) INTERSECT (SELECT DISTINCT events_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id LIMIT 10)) as foo -WHERE +WHERE foo.user_id = events_table.user_id; -DEBUG: generating subplan 49_1 for CTE events_table: SELECT DISTINCT user_id FROM public.events_table +DEBUG: generating subplan XXX_1 for CTE events_table: SELECT DISTINCT user_id FROM public.events_table DEBUG: push down of limit count: 10 -DEBUG: generating subplan 49_2 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) LIMIT 10 -DEBUG: generating subplan 49_3 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) -DEBUG: generating subplan 49_4 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('49_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('49_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) -DEBUG: Plan 49 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT events_table_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('49_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) events_table_1) events_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('49_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.user_id) - count -------- +DEBUG: generating subplan XXX_2 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) LIMIT 10 +DEBUG: generating subplan XXX_3 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) +DEBUG: generating subplan XXX_4 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT events_table_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) events_table_1) events_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.user_id) + count +--------------------------------------------------------------------- 6 (1 row) -- CTE inside unions -(WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) UNION -(WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) ORDER BY 1 DESC; -DEBUG: generating subplan 54_1 for CTE cte_1: SELECT user_id FROM public.users_table -DEBUG: generating subplan 54_2 for CTE cte_1: SELECT user_id FROM public.users_table -DEBUG: Plan 54 query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('54_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('54_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 ORDER BY 1 DESC - user_id ---------- +(WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) UNION +(WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) ORDER BY 1 DESC; +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_2 for CTE cte_1: SELECT user_id FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 ORDER BY 1 DESC + user_id +--------------------------------------------------------------------- 6 5 4 @@ -276,29 +276,29 @@ SELECT count(*) FROM ( - (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) UNION + (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) UNION (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) ) as foo, users_table WHERE users_table.value_2 = foo.user_id; -DEBUG: generating subplan 57_1 for CTE cte_1: SELECT user_id FROM public.users_table -DEBUG: generating subplan 57_2 for CTE cte_1: SELECT user_id FROM public.users_table -DEBUG: generating subplan 57_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('57_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('57_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 -DEBUG: Plan 57 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('57_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, public.users_table WHERE (users_table.value_2 OPERATOR(pg_catalog.=) foo.user_id) - count -------- +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_2 for CTE cte_1: SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, public.users_table WHERE (users_table.value_2 OPERATOR(pg_catalog.=) foo.user_id) + count +--------------------------------------------------------------------- 92 (1 row) -- CTEs with less alias than the input subquery -(WITH cte_1(x) AS (SELECT user_id, value_2 FROM users_table) SELECT * FROM cte_1) UNION +(WITH cte_1(x) AS (SELECT user_id, value_2 FROM users_table) SELECT * FROM cte_1) UNION (WITH cte_1(x) AS (SELECT user_id, value_2 FROM users_table) SELECT * FROM cte_1) ORDER BY 1 DESC, 2 DESC LIMIT 5; -DEBUG: generating subplan 61_1 for CTE cte_1: SELECT user_id, value_2 FROM public.users_table -DEBUG: generating subplan 61_2 for CTE cte_1: SELECT user_id, value_2 FROM public.users_table -DEBUG: Plan 61 query after replacing subqueries and CTEs: SELECT cte_1.x, cte_1.value_2 FROM (SELECT intermediate_result.user_id AS x, intermediate_result.value_2 FROM read_intermediate_result('61_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) cte_1 UNION SELECT cte_1.x, cte_1.value_2 FROM (SELECT intermediate_result.user_id AS x, intermediate_result.value_2 FROM read_intermediate_result('61_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) cte_1 ORDER BY 1 DESC, 2 DESC LIMIT 5 - x | value_2 ----+--------- - 6 | +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT user_id, value_2 FROM public.users_table +DEBUG: generating subplan XXX_2 for CTE cte_1: SELECT user_id, value_2 FROM public.users_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_1.x, cte_1.value_2 FROM (SELECT intermediate_result.user_id AS x, intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) cte_1 UNION SELECT cte_1.x, cte_1.value_2 FROM (SELECT intermediate_result.user_id AS x, intermediate_result.value_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) cte_1 ORDER BY 1 DESC, 2 DESC LIMIT 5 + x | value_2 +--------------------------------------------------------------------- + 6 | 6 | 4 6 | 3 6 | 2 @@ -306,7 +306,7 @@ DEBUG: Plan 61 query after replacing subqueries and CTEs: SELECT cte_1.x, cte_1 (5 rows) -- simple subqueries in WHERE with unions -SELECT +SELECT count(*) FROM users_table @@ -321,64 +321,64 @@ WHERE ) SELECT DISTINCT user_id FROM cte_1 ) ORDER BY 1 DESC; -DEBUG: generating subplan 64_1 for CTE cte_1: SELECT users_table.user_id FROM public.users_table UNION SELECT events_table.user_id FROM public.events_table -DEBUG: generating subplan 65_1 for subquery SELECT user_id FROM public.users_table -DEBUG: generating subplan 65_2 for subquery SELECT user_id FROM public.events_table -DEBUG: Plan 65 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('65_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('65_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) -DEBUG: Plan 64 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_2 OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('64_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1)) ORDER BY (count(*)) DESC - count -------- +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT users_table.user_id FROM public.users_table UNION SELECT events_table.user_id FROM public.events_table +DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.events_table +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_2 OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1)) ORDER BY (count(*)) DESC + count +--------------------------------------------------------------------- 92 (1 row) -- simple subqueries in WHERE with unions and ctes -SELECT +SELECT count(*) FROM users_table WHERE value_2 IN ( - WITH + WITH cte_1 AS (SELECT user_id FROM users_table), cte_2 AS (SELECT user_id FROM events_table) (SELECT * FROM cte_1) UNION (SELECT * FROM cte_2) ) ORDER BY 1 DESC; -DEBUG: generating subplan 68_1 for CTE cte_1: SELECT user_id FROM public.users_table -DEBUG: generating subplan 68_2 for CTE cte_2: SELECT user_id FROM public.events_table -DEBUG: generating subplan 68_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('68_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('68_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 -DEBUG: Plan 68 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_2 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('68_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) ORDER BY (count(*)) DESC - count -------- +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT user_id FROM public.events_table +DEBUG: generating subplan XXX_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_2 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) ORDER BY (count(*)) DESC + count +--------------------------------------------------------------------- 92 (1 row) -- unions and ctes inside subqueries in where clause with a pushdownable correlated subquery -SELECT - DISTINCT user_id -FROM - events_table -WHERE - event_type IN +SELECT + DISTINCT user_id +FROM + events_table +WHERE + event_type IN ( SELECT users_table.user_id FROM ( - (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) UNION + (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) UNION (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) ) as foo, users_table WHERE users_table.value_2 = foo.user_id AND events_table.user_id = users_table.user_id ) ORDER BY 1 DESC; -DEBUG: generating subplan 72_1 for CTE cte_1: SELECT user_id FROM public.users_table -DEBUG: generating subplan 72_2 for CTE cte_1: SELECT user_id FROM public.users_table -DEBUG: generating subplan 72_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('72_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('72_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 -DEBUG: Plan 72 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT users_table.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('72_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, public.users_table WHERE ((users_table.value_2 OPERATOR(pg_catalog.=) foo.user_id) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))) ORDER BY user_id DESC - user_id ---------- +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_2 for CTE cte_1: SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT users_table.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, public.users_table WHERE ((users_table.value_2 OPERATOR(pg_catalog.=) foo.user_id) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))) ORDER BY user_id DESC + user_id +--------------------------------------------------------------------- 5 4 3 @@ -388,18 +388,18 @@ DEBUG: Plan 72 query after replacing subqueries and CTEs: SELECT DISTINCT user_ -- unions and ctes inside subqueries in where clause with a not pushdownable correlated subquery -- should error out -SELECT - DISTINCT user_id -FROM - events_table -WHERE - event_type IN +SELECT + DISTINCT user_id +FROM + events_table +WHERE + event_type IN ( SELECT users_table.user_id FROM ( - (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) UNION + (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) UNION (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) ) as foo, users_table @@ -407,10 +407,10 @@ WHERE LIMIT 5 ) ORDER BY 1 DESC; -DEBUG: generating subplan 76_1 for CTE cte_1: SELECT user_id FROM public.users_table -DEBUG: generating subplan 76_2 for CTE cte_1: SELECT user_id FROM public.users_table -DEBUG: generating subplan 76_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('76_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('76_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 -DEBUG: Plan 76 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT users_table.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('76_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, public.users_table WHERE ((users_table.value_2 OPERATOR(pg_catalog.=) foo.user_id) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)) LIMIT 5)) ORDER BY user_id DESC +DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_2 for CTE cte_1: SELECT user_id FROM public.users_table +DEBUG: generating subplan XXX_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT users_table.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, public.users_table WHERE ((users_table.value_2 OPERATOR(pg_catalog.=) foo.user_id) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)) LIMIT 5)) ORDER BY user_id DESC ERROR: cannot push down this subquery DETAIL: Limit in subquery is currently unsupported when a subquery references a column from another query SET client_min_messages TO DEFAULT; diff --git a/src/test/regress/expected/with_transactions.out b/src/test/regress/expected/with_transactions.out index 95cc90eaf..4e8f0b4f1 100644 --- a/src/test/regress/expected/with_transactions.out +++ b/src/test/regress/expected/with_transactions.out @@ -5,16 +5,16 @@ SET citus.shard_replication_factor TO 1; -- https://github.com/citusdata/citus/i SET citus.next_placement_id TO 800000; CREATE TABLE with_transactions.raw_table (tenant_id int, income float, created_at timestamptz); SELECT create_distributed_table('raw_table', 'tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) CREATE TABLE with_transactions.second_raw_table (tenant_id int, income float, created_at timestamptz); SELECT create_distributed_table('second_raw_table', 'tenant_id'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO @@ -36,28 +36,28 @@ BEGIN; DELETE FROM raw_table WHERE created_at < '2014-02-10 20:00:00' AND tenant_id IN (SELECT * from ids_to_delete) RETURNING tenant_id ) UPDATE raw_table SET income = income * 2 WHERE tenant_id IN (SELECT tenant_id FROM deleted_ids); -DEBUG: generating subplan 3_1 for CTE ids_to_delete: SELECT tenant_id FROM with_transactions.raw_table WHERE (income OPERATOR(pg_catalog.<) (250)::double precision) -DEBUG: generating subplan 3_2 for CTE deleted_ids: DELETE FROM with_transactions.raw_table WHERE ((created_at OPERATOR(pg_catalog.<) 'Mon Feb 10 20:00:00 2014 PST'::timestamp with time zone) AND (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_to_delete.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) ids_to_delete))) RETURNING tenant_id -DEBUG: Plan 3 query after replacing subqueries and CTEs: UPDATE with_transactions.raw_table SET income = (income OPERATOR(pg_catalog.*) (2)::double precision) WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT deleted_ids.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) deleted_ids)) +DEBUG: generating subplan XXX_1 for CTE ids_to_delete: SELECT tenant_id FROM with_transactions.raw_table WHERE (income OPERATOR(pg_catalog.<) (250)::double precision) +DEBUG: generating subplan XXX_2 for CTE deleted_ids: DELETE FROM with_transactions.raw_table WHERE ((created_at OPERATOR(pg_catalog.<) 'Mon Feb 10 20:00:00 2014 PST'::timestamp with time zone) AND (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_to_delete.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) ids_to_delete))) RETURNING tenant_id +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE with_transactions.raw_table SET income = (income OPERATOR(pg_catalog.*) (2)::double precision) WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT deleted_ids.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) deleted_ids)) ROLLBACK; -- see that both UPDATE and DELETE commands are rollbacked SELECT count(*) FROM raw_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) SELECT max(income) FROM raw_table; - max ------- + max +--------------------------------------------------------------------- 1000 (1 row) -- multi-statement multi shard modifying statements should work BEGIN; SELECT count (*) FROM second_raw_table; - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -70,9 +70,9 @@ BEGIN; ) UPDATE raw_table SET created_at = '2001-02-10 20:00:00' WHERE tenant_id IN (SELECT tenant_id FROM ids_inserted) AND tenant_id < (SELECT count FROM distinct_count); -DEBUG: generating subplan 9_1 for CTE distinct_count: SELECT count(DISTINCT created_at) AS count FROM with_transactions.raw_table -DEBUG: generating subplan 9_2 for CTE ids_inserted: INSERT INTO with_transactions.raw_table (tenant_id, income, created_at) VALUES (11, 1000, now()) RETURNING tenant_id -DEBUG: Plan 9 query after replacing subqueries and CTEs: UPDATE with_transactions.raw_table SET created_at = 'Sat Feb 10 20:00:00 2001 PST'::timestamp with time zone WHERE ((tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_inserted.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('9_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) ids_inserted)) AND (tenant_id OPERATOR(pg_catalog.<) (SELECT distinct_count.count FROM (SELECT intermediate_result.count FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) distinct_count))) +DEBUG: generating subplan XXX_1 for CTE distinct_count: SELECT count(DISTINCT created_at) AS count FROM with_transactions.raw_table +DEBUG: generating subplan XXX_2 for CTE ids_inserted: INSERT INTO with_transactions.raw_table (tenant_id, income, created_at) VALUES (11, 1000, now()) RETURNING tenant_id +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE with_transactions.raw_table SET created_at = 'Sat Feb 10 20:00:00 2001 PST'::timestamp with time zone WHERE ((tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_inserted.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) ids_inserted)) AND (tenant_id OPERATOR(pg_catalog.<) (SELECT distinct_count.count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) distinct_count))) TRUNCATE second_raw_table; COMMIT; -- sequential insert followed by parallel update works just fine @@ -81,24 +81,24 @@ WITH ids_inserted AS INSERT INTO raw_table VALUES (11, 1000, now()), (12, 1000, now()), (13, 1000, now()) RETURNING tenant_id ) UPDATE raw_table SET created_at = '2001-02-10 20:00:00' WHERE tenant_id IN (SELECT tenant_id FROM ids_inserted); -DEBUG: generating subplan 12_1 for CTE ids_inserted: INSERT INTO with_transactions.raw_table (tenant_id, income, created_at) VALUES (11,1000,now()), (12,1000,now()), (13,1000,now()) RETURNING raw_table.tenant_id -DEBUG: Plan 12 query after replacing subqueries and CTEs: UPDATE with_transactions.raw_table SET created_at = 'Sat Feb 10 20:00:00 2001 PST'::timestamp with time zone WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_inserted.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) ids_inserted)) +DEBUG: generating subplan XXX_1 for CTE ids_inserted: INSERT INTO with_transactions.raw_table (tenant_id, income, created_at) VALUES (11,1000,now()), (12,1000,now()), (13,1000,now()) RETURNING raw_table.tenant_id +DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE with_transactions.raw_table SET created_at = 'Sat Feb 10 20:00:00 2001 PST'::timestamp with time zone WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_inserted.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) ids_inserted)) -- make sure that everything committed SELECT count(*) FROM raw_table; - count -------- + count +--------------------------------------------------------------------- 105 (1 row) SELECT count(*) FROM raw_table WHERE created_at = '2001-02-10 20:00:00'; - count -------- + count +--------------------------------------------------------------------- 4 (1 row) SELECT count(*) FROM second_raw_table; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) @@ -110,11 +110,11 @@ WITH ids_inserted AS INSERT INTO raw_table (tenant_id) VALUES (11), (12), (13), (14) RETURNING tenant_id ) SELECT income FROM second_raw_table WHERE tenant_id IN (SELECT * FROM ids_inserted) ORDER BY 1 DESC LIMIT 3; -DEBUG: generating subplan 17_1 for CTE ids_inserted: INSERT INTO with_transactions.raw_table (tenant_id) VALUES (11), (12), (13), (14) RETURNING raw_table.tenant_id -DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT income FROM with_transactions.second_raw_table WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_inserted.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) ids_inserted)) ORDER BY income DESC LIMIT 3 +DEBUG: generating subplan XXX_1 for CTE ids_inserted: INSERT INTO with_transactions.raw_table (tenant_id) VALUES (11), (12), (13), (14) RETURNING raw_table.tenant_id +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT income FROM with_transactions.second_raw_table WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_inserted.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) ids_inserted)) ORDER BY income DESC LIMIT 3 DEBUG: push down of limit count: 3 - income --------- + income +--------------------------------------------------------------------- (0 rows) ROLLBACK; @@ -135,8 +135,8 @@ BEGIN END; $BODY$; SELECT count(*) FROM (SELECT run_ctes(s) FROM generate_series(1,current_setting('max_connections')::int+2) s) a; - count -------- + count +--------------------------------------------------------------------- 102 (1 row) diff --git a/src/test/regress/expected/with_where.out b/src/test/regress/expected/with_where.out index 4e0704f86..032b7444b 100644 --- a/src/test/regress/expected/with_where.out +++ b/src/test/regress/expected/with_where.out @@ -2,15 +2,15 @@ SET citus.enable_repartition_joins TO on; -- CTE in WHERE basic WITH events AS ( - SELECT - event_type - FROM - events_table + SELECT + event_type + FROM + events_table WHERE - user_id < 5 + user_id < 5 GROUP BY event_type - ORDER BY + ORDER BY event_type LIMIT 10 ) @@ -25,8 +25,8 @@ IN event_type FROM events); - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -37,12 +37,12 @@ WITH users AS ( events_table, users_table WHERE events_table.user_id = users_table.user_id - GROUP BY + GROUP BY 1 ORDER BY 1 LIMIT 10 -) +) SELECT count(*) FROM @@ -55,8 +55,8 @@ WHERE FROM users ); - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -67,12 +67,12 @@ WITH users AS ( events_table, users_table WHERE events_table.user_id = users_table.user_id - GROUP BY + GROUP BY 1 ORDER BY 1 LIMIT 10 -) +) SELECT count(*) FROM @@ -85,8 +85,8 @@ WHERE FROM users ); - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -98,12 +98,12 @@ WITH users AS ( events_table, users_table WHERE events_table.value_2 = users_table.value_2 - GROUP BY + GROUP BY 1 ORDER BY 1 LIMIT 10 -) +) SELECT count(*) FROM @@ -116,8 +116,8 @@ WHERE FROM users ); - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -134,19 +134,19 @@ WHERE event_type IN (WITH events AS ( - SELECT - event_type - FROM - events_table - WHERE user_id < 5 - GROUP BY - 1 - ORDER BY + SELECT + event_type + FROM + events_table + WHERE user_id < 5 + GROUP BY + 1 + ORDER BY 1) SELECT * FROM events LIMIT 10 ); - count -------- + count +--------------------------------------------------------------------- 101 (1 row) @@ -168,15 +168,15 @@ WHERE events_table, users_table WHERE events_table.value_2 = users_table.value_2 - GROUP BY + GROUP BY 1 ORDER BY 1 ) SELECT * FROM users LIMIT 10 ); - count -------- + count +--------------------------------------------------------------------- 101 (1 row) diff --git a/src/test/regress/expected/worker_binary_data_partition.out b/src/test/regress/expected/worker_binary_data_partition.out index b07d7d66e..9e031cdae 100644 --- a/src/test/regress/expected/worker_binary_data_partition.out +++ b/src/test/regress/expected/worker_binary_data_partition.out @@ -21,8 +21,8 @@ SELECT usesysid AS userid FROM pg_user WHERE usename = current_user \gset CREATE TABLE :Table_Name(textcolumn text, binarycolumn bytea); COPY :Table_Name FROM stdin; SELECT length(binarycolumn) FROM :Table_Name; - length --------- + length +--------------------------------------------------------------------- 2 4 3 @@ -43,9 +43,9 @@ SELECT length(binarycolumn) FROM :Table_Name; SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY['aaa', 'some']::_text); - worker_range_partition_table ------------------------------- - + worker_range_partition_table +--------------------------------------------------------------------- + (1 row) -- Copy range partitioned files into tables @@ -60,8 +60,8 @@ SELECT COUNT(*) AS total_row_count FROM ( SELECT * FROM :Table_Part_00 UNION ALL SELECT * FROM :Table_Part_01 UNION ALL SELECT * FROM :Table_Part_02 ) AS all_rows; - total_row_count ------------------ + total_row_count +--------------------------------------------------------------------- 14 (1 row) @@ -71,8 +71,8 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Table_Part_00 EXCEPT ALL :Select_All FROM :Table_Name WHERE :Partition_Column IS NULL OR :Partition_Column < 'aaa' ) diff; - diff_lhs_00 -------------- + diff_lhs_00 +--------------------------------------------------------------------- 0 (1 row) @@ -80,16 +80,16 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Table_Part_01 EXCEPT ALL :Select_All FROM :Table_Name WHERE :Partition_Column >= 'aaa' AND :Partition_Column < 'some' ) diff; - diff_lhs_01 -------------- + diff_lhs_01 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Table_Part_02 EXCEPT ALL :Select_All FROM :Table_Name WHERE :Partition_Column >= 'some' ) diff; - diff_lhs_02 -------------- + diff_lhs_02 +--------------------------------------------------------------------- 0 (1 row) @@ -97,8 +97,8 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_All FROM :Table_Name WHERE :Partition_Column IS NULL OR :Partition_Column < 'aaa' EXCEPT ALL :Select_All FROM :Table_Part_00 ) diff; - diff_rhs_00 -------------- + diff_rhs_00 +--------------------------------------------------------------------- 0 (1 row) @@ -106,16 +106,16 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM :Table_Name WHERE :Partition_Column >= 'aaa' AND :Partition_Column < 'some' EXCEPT ALL :Select_All FROM :Table_Part_01 ) diff; - diff_rhs_01 -------------- + diff_rhs_01 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM :Table_Name WHERE :Partition_Column >= 'some' EXCEPT ALL :Select_All FROM :Table_Part_02 ) diff; - diff_rhs_02 -------------- + diff_rhs_02 +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_check_invalid_arguments.out b/src/test/regress/expected/worker_check_invalid_arguments.out index 81c7dce62..3fa2d2335 100644 --- a/src/test/regress/expected/worker_check_invalid_arguments.out +++ b/src/test/regress/expected/worker_check_invalid_arguments.out @@ -20,8 +20,8 @@ SET citus.next_shard_id TO 1100000; CREATE TABLE :Table_Name(textcolumn text, binarycolumn bytea); COPY :Table_Name FROM stdin; SELECT COUNT(*) FROM :Table_Name; - count -------- + count +--------------------------------------------------------------------- 2 (1 row) @@ -30,8 +30,6 @@ SELECT worker_range_partition_table(:JobId, :TaskId, :Bad_Select_Query_Text, :Partition_Column_Name, :Partition_Column_Type, ARRAY['aaa', 'some']::_text); ERROR: relation "bad_table_name" does not exist -LINE 1: SELECT * FROM bad_table_name - ^ QUERY: SELECT * FROM bad_table_name -- Check that we fail with bad partition column name SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, @@ -52,9 +50,9 @@ ERROR: partition column types 25 and 20 do not match SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Name, :Partition_Column_Type, ARRAY['aaa', 'some']::_text); - worker_range_partition_table ------------------------------- - + worker_range_partition_table +--------------------------------------------------------------------- + (1 row) -- Check that we fail to merge when the number of column names and column types @@ -92,9 +90,9 @@ ERROR: could not open directory "base/pgsql_job_cache/job_201010/task_429496729 SELECT worker_merge_files_into_table(:JobId, :TaskId, ARRAY['textcolumn', 'binarycolumn'], ARRAY['text', 'bytea']); - worker_merge_files_into_table -------------------------------- - + worker_merge_files_into_table +--------------------------------------------------------------------- + (1 row) -- worker_execute_sql_task should only accept queries diff --git a/src/test/regress/expected/worker_hash_partition.out b/src/test/regress/expected/worker_hash_partition.out index c21c943d4..08c6d468f 100644 --- a/src/test/regress/expected/worker_hash_partition.out +++ b/src/test/regress/expected/worker_hash_partition.out @@ -26,9 +26,9 @@ SELECT usesysid AS userid FROM pg_user WHERE usename = current_user \gset SELECT worker_hash_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type::regtype, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]); - worker_hash_partition_table ------------------------------ - + worker_hash_partition_table +--------------------------------------------------------------------- + (1 row) COPY :Table_Part_00 FROM :'Table_File_00'; @@ -36,26 +36,26 @@ COPY :Table_Part_01 FROM :'Table_File_01'; COPY :Table_Part_02 FROM :'Table_File_02'; COPY :Table_Part_03 FROM :'Table_File_03'; SELECT COUNT(*) FROM :Table_Part_00; - count -------- + count +--------------------------------------------------------------------- 2885 (1 row) SELECT COUNT(*) FROM :Table_Part_01; - count -------- + count +--------------------------------------------------------------------- 3009 (1 row) SELECT COUNT(*) FROM :Table_Part_02; - count -------- + count +--------------------------------------------------------------------- 3104 (1 row) SELECT COUNT(*) FROM :Table_Part_03; - count -------- + count +--------------------------------------------------------------------- 3002 (1 row) @@ -64,64 +64,64 @@ SELECT COUNT(*) FROM :Table_Part_03; SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Table_Part_00 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 0) ) diff; - diff_lhs_00 -------------- + diff_lhs_00 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Table_Part_01 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 1) ) diff; - diff_lhs_01 -------------- + diff_lhs_01 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Table_Part_02 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 2) ) diff; - diff_lhs_02 -------------- + diff_lhs_02 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_All FROM :Table_Part_03 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 3) ) diff; - diff_lhs_03 -------------- + diff_lhs_03 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 0) EXCEPT ALL :Select_All FROM :Table_Part_00 ) diff; - diff_rhs_00 -------------- + diff_rhs_00 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 1) EXCEPT ALL :Select_All FROM :Table_Part_01 ) diff; - diff_rhs_01 -------------- + diff_rhs_01 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 2) EXCEPT ALL :Select_All FROM :Table_Part_02 ) diff; - diff_rhs_02 -------------- + diff_rhs_02 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 3) EXCEPT ALL :Select_All FROM :Table_Part_03 ) diff; - diff_rhs_03 -------------- + diff_rhs_03 +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_hash_partition_complex.out b/src/test/regress/expected/worker_hash_partition_complex.out index 4931d0032..14c879bdf 100644 --- a/src/test/regress/expected/worker_hash_partition_complex.out +++ b/src/test/regress/expected/worker_hash_partition_complex.out @@ -30,9 +30,9 @@ SELECT worker_hash_partition_table(:JobId, :TaskId, ' AND l_discount between 0.02 AND 0.08', :Partition_Column_Text, :Partition_Column_Type, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]); - worker_hash_partition_table ------------------------------ - + worker_hash_partition_table +--------------------------------------------------------------------- + (1 row) -- Copy partitioned data files into tables for testing purposes @@ -41,14 +41,14 @@ COPY :Table_Part_01 FROM :'Table_File_01'; COPY :Table_Part_02 FROM :'Table_File_02'; COPY :Table_Part_03 FROM :'Table_File_03'; SELECT COUNT(*) FROM :Table_Part_00; - count -------- + count +--------------------------------------------------------------------- 1883 (1 row) SELECT COUNT(*) FROM :Table_Part_03; - count -------- + count +--------------------------------------------------------------------- 1913 (1 row) @@ -58,8 +58,8 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_Columns FROM :Table_Part_00 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 0) ) diff; - diff_lhs_00 -------------- + diff_lhs_00 +--------------------------------------------------------------------- 0 (1 row) @@ -67,8 +67,8 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_Columns FROM :Table_Part_01 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 1) ) diff; - diff_lhs_01 -------------- + diff_lhs_01 +--------------------------------------------------------------------- 0 (1 row) @@ -76,8 +76,8 @@ SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_Columns FROM :Table_Part_02 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 2) ) diff; - diff_lhs_02 -------------- + diff_lhs_02 +--------------------------------------------------------------------- 0 (1 row) @@ -85,8 +85,8 @@ SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_Columns FROM :Table_Part_03 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 3) ) diff; - diff_lhs_03 -------------- + diff_lhs_03 +--------------------------------------------------------------------- 0 (1 row) @@ -94,8 +94,8 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 0) EXCEPT ALL :Select_Columns FROM :Table_Part_00 ) diff; - diff_rhs_00 -------------- + diff_rhs_00 +--------------------------------------------------------------------- 0 (1 row) @@ -103,8 +103,8 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 1) EXCEPT ALL :Select_Columns FROM :Table_Part_01 ) diff; - diff_rhs_01 -------------- + diff_rhs_01 +--------------------------------------------------------------------- 0 (1 row) @@ -112,8 +112,8 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 2) EXCEPT ALL :Select_Columns FROM :Table_Part_02 ) diff; - diff_rhs_02 -------------- + diff_rhs_02 +--------------------------------------------------------------------- 0 (1 row) @@ -121,8 +121,8 @@ SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 3) EXCEPT ALL :Select_Columns FROM :Table_Part_03 ) diff; - diff_rhs_03 -------------- + diff_rhs_03 +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_merge_hash_files.out b/src/test/regress/expected/worker_merge_hash_files.out index 89246b89f..74d4a9016 100644 --- a/src/test/regress/expected/worker_merge_hash_files.out +++ b/src/test/regress/expected/worker_merge_hash_files.out @@ -15,36 +15,36 @@ SELECT worker_merge_files_into_table(:JobId, :TaskId, ARRAY['bigint', 'integer', 'integer', 'integer', 'decimal(15, 2)', 'decimal(15, 2)', 'decimal(15, 2)', 'decimal(15, 2)', 'char(1)', 'char(1)', 'date', 'date', 'date', 'char(25)', 'char(10)', 'varchar(44)']::_text); - worker_merge_files_into_table -------------------------------- - + worker_merge_files_into_table +--------------------------------------------------------------------- + (1 row) -- We first count elements from the merged table and the original table we hash -- partitioned. We then compute the difference of these two tables. SELECT COUNT(*) FROM :Task_Table_Name; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) SELECT COUNT(*) FROM lineitem; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) SELECT COUNT(*) AS diff_lhs FROM ( :Select_All FROM :Task_Table_Name EXCEPT ALL :Select_All FROM lineitem ) diff; - diff_lhs ----------- + diff_lhs +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs FROM ( :Select_All FROM lineitem EXCEPT ALL :Select_All FROM :Task_Table_Name ) diff; - diff_rhs ----------- + diff_rhs +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_merge_range_files.out b/src/test/regress/expected/worker_merge_range_files.out index b39f52731..54907fe4a 100644 --- a/src/test/regress/expected/worker_merge_range_files.out +++ b/src/test/regress/expected/worker_merge_range_files.out @@ -15,36 +15,36 @@ SELECT worker_merge_files_into_table(:JobId, :TaskId, ARRAY['bigint', 'integer', 'integer', 'integer', 'decimal(15, 2)', 'decimal(15, 2)', 'decimal(15, 2)', 'decimal(15, 2)', 'char(1)', 'char(1)', 'date', 'date', 'date', 'char(25)', 'char(10)', 'varchar(44)']::_text); - worker_merge_files_into_table -------------------------------- - + worker_merge_files_into_table +--------------------------------------------------------------------- + (1 row) -- We first count elements from the merged table and the original table we range -- partitioned. We then compute the difference of these two tables. SELECT COUNT(*) FROM :Task_Table_Name; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) SELECT COUNT(*) FROM lineitem; - count -------- + count +--------------------------------------------------------------------- 12000 (1 row) SELECT COUNT(*) AS diff_lhs FROM ( :Select_All FROM :Task_Table_Name EXCEPT ALL :Select_All FROM lineitem ) diff; - diff_lhs ----------- + diff_lhs +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs FROM ( :Select_All FROM lineitem EXCEPT ALL :Select_All FROM :Task_Table_Name ) diff; - diff_rhs ----------- + diff_rhs +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_null_data_partition.out b/src/test/regress/expected/worker_null_data_partition.out index 08b3e0976..0aa065320 100644 --- a/src/test/regress/expected/worker_null_data_partition.out +++ b/src/test/regress/expected/worker_null_data_partition.out @@ -23,9 +23,9 @@ SELECT usesysid AS userid FROM pg_user WHERE usename = current_user \gset SELECT worker_range_partition_table(:JobId, :Range_TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY[0, 10]::_int4); - worker_range_partition_table ------------------------------- - + worker_range_partition_table +--------------------------------------------------------------------- + (1 row) -- Copy partitioned data files into tables for testing purposes @@ -33,14 +33,14 @@ COPY :Range_Table_Part_00 FROM :'Range_Table_File_00'; COPY :Range_Table_Part_01 FROM :'Range_Table_File_01'; COPY :Range_Table_Part_02 FROM :'Range_Table_File_02'; SELECT COUNT(*) FROM :Range_Table_Part_00; - count -------- + count +--------------------------------------------------------------------- 6 (1 row) SELECT COUNT(*) FROM :Range_Table_Part_02; - count -------- + count +--------------------------------------------------------------------- 588 (1 row) @@ -50,8 +50,8 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Range_Table_Part_00 EXCEPT ALL (:Select_All FROM supplier WHERE :Partition_Column < 0 OR :Partition_Column IS NULL) ) diff; - diff_lhs_00 -------------- + diff_lhs_00 +--------------------------------------------------------------------- 0 (1 row) @@ -59,16 +59,16 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Range_Table_Part_01 EXCEPT ALL :Select_All FROM supplier WHERE :Partition_Column >= 0 AND :Partition_Column < 10 ) diff; - diff_lhs_01 -------------- + diff_lhs_01 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM supplier WHERE :Partition_Column >= 10 EXCEPT ALL :Select_All FROM :Range_Table_Part_02 ) diff; - diff_rhs_02 -------------- + diff_rhs_02 +--------------------------------------------------------------------- 0 (1 row) @@ -76,8 +76,8 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( (:Select_All FROM supplier WHERE :Partition_Column < 0 OR :Partition_Column IS NULL) EXCEPT ALL :Select_All FROM :Range_Table_Part_00 ) diff; - diff_rhs_00 -------------- + diff_rhs_00 +--------------------------------------------------------------------- 0 (1 row) @@ -85,16 +85,16 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM supplier WHERE :Partition_Column >= 0 AND :Partition_Column < 10 EXCEPT ALL :Select_All FROM :Range_Table_Part_01 ) diff; - diff_rhs_01 -------------- + diff_rhs_01 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM supplier WHERE :Partition_Column >= 10 EXCEPT ALL :Select_All FROM :Range_Table_Part_02 ) diff; - diff_rhs_02 -------------- + diff_rhs_02 +--------------------------------------------------------------------- 0 (1 row) @@ -116,23 +116,23 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( SELECT worker_hash_partition_table(:JobId, :Hash_TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]); - worker_hash_partition_table ------------------------------ - + worker_hash_partition_table +--------------------------------------------------------------------- + (1 row) COPY :Hash_Table_Part_00 FROM :'Hash_Table_File_00'; COPY :Hash_Table_Part_01 FROM :'Hash_Table_File_01'; COPY :Hash_Table_Part_02 FROM :'Hash_Table_File_02'; SELECT COUNT(*) FROM :Hash_Table_Part_00; - count -------- + count +--------------------------------------------------------------------- 282 (1 row) SELECT COUNT(*) FROM :Hash_Table_Part_02; - count -------- + count +--------------------------------------------------------------------- 102 (1 row) @@ -142,24 +142,24 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Hash_Table_Part_00 EXCEPT ALL (:Select_All FROM supplier WHERE (:Hash_Mod_Function = 0) OR :Partition_Column IS NULL) ) diff; - diff_lhs_00 -------------- + diff_lhs_00 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Hash_Table_Part_01 EXCEPT ALL :Select_All FROM supplier WHERE (:Hash_Mod_Function = 1) ) diff; - diff_lhs_01 -------------- + diff_lhs_01 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Hash_Table_Part_02 EXCEPT ALL :Select_All FROM supplier WHERE (:Hash_Mod_Function = 2) ) diff; - diff_lhs_02 -------------- + diff_lhs_02 +--------------------------------------------------------------------- 0 (1 row) @@ -167,24 +167,24 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( (:Select_All FROM supplier WHERE (:Hash_Mod_Function = 0) OR :Partition_Column IS NULL) EXCEPT ALL :Select_All FROM :Hash_Table_Part_00 ) diff; - diff_rhs_00 -------------- + diff_rhs_00 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM supplier WHERE (:Hash_Mod_Function = 1) EXCEPT ALL :Select_All FROM :Hash_Table_Part_01 ) diff; - diff_rhs_01 -------------- + diff_rhs_01 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM supplier WHERE (:Hash_Mod_Function = 2) EXCEPT ALL :Select_All FROM :Hash_Table_Part_02 ) diff; - diff_rhs_02 -------------- + diff_rhs_02 +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_range_partition.out b/src/test/regress/expected/worker_range_partition.out index bb4041ac3..2e925471b 100644 --- a/src/test/regress/expected/worker_range_partition.out +++ b/src/test/regress/expected/worker_range_partition.out @@ -22,9 +22,9 @@ SELECT usesysid AS userid FROM pg_user WHERE usename = current_user \gset SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY[1, 3000, 12000]::_int8); - worker_range_partition_table ------------------------------- - + worker_range_partition_table +--------------------------------------------------------------------- + (1 row) COPY :Table_Part_00 FROM :'Table_File_00'; @@ -32,14 +32,14 @@ COPY :Table_Part_01 FROM :'Table_File_01'; COPY :Table_Part_02 FROM :'Table_File_02'; COPY :Table_Part_03 FROM :'Table_File_03'; SELECT COUNT(*) FROM :Table_Part_00; - count -------- + count +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) FROM :Table_Part_03; - count -------- + count +--------------------------------------------------------------------- 3047 (1 row) @@ -48,8 +48,8 @@ SELECT COUNT(*) FROM :Table_Part_03; SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Table_Part_00 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column < 1 ) diff; - diff_lhs_00 -------------- + diff_lhs_00 +--------------------------------------------------------------------- 0 (1 row) @@ -57,8 +57,8 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Table_Part_01 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column >= 1 AND :Partition_Column < 3000 ) diff; - diff_lhs_01 -------------- + diff_lhs_01 +--------------------------------------------------------------------- 0 (1 row) @@ -66,24 +66,24 @@ SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Table_Part_02 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column >= 3000 AND :Partition_Column < 12000 ) diff; - diff_lhs_02 -------------- + diff_lhs_02 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_All FROM :Table_Part_03 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column >= 12000 ) diff; - diff_lhs_03 -------------- + diff_lhs_03 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_All FROM lineitem WHERE :Partition_Column < 1 EXCEPT ALL :Select_All FROM :Table_Part_00 ) diff; - diff_rhs_00 -------------- + diff_rhs_00 +--------------------------------------------------------------------- 0 (1 row) @@ -91,8 +91,8 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM lineitem WHERE :Partition_Column >= 1 AND :Partition_Column < 3000 EXCEPT ALL :Select_All FROM :Table_Part_01 ) diff; - diff_rhs_01 -------------- + diff_rhs_01 +--------------------------------------------------------------------- 0 (1 row) @@ -100,16 +100,16 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM lineitem WHERE :Partition_Column >= 3000 AND :Partition_Column < 12000 EXCEPT ALL :Select_All FROM :Table_Part_02 ) diff; - diff_rhs_02 -------------- + diff_rhs_02 +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_All FROM lineitem WHERE :Partition_Column >= 12000 EXCEPT ALL :Select_All FROM :Table_Part_03 ) diff; - diff_rhs_03 -------------- + diff_rhs_03 +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_range_partition_complex.out b/src/test/regress/expected/worker_range_partition_complex.out index ff739a3c1..63ec9b376 100644 --- a/src/test/regress/expected/worker_range_partition_complex.out +++ b/src/test/regress/expected/worker_range_partition_complex.out @@ -27,9 +27,9 @@ SELECT worker_range_partition_table(:JobId, :TaskId, ' AND l_discount between 0.02 AND 0.08', :Partition_Column_Text, :Partition_Column_Type, ARRAY[101, 12000, 18000]::_int4); - worker_range_partition_table ------------------------------- - + worker_range_partition_table +--------------------------------------------------------------------- + (1 row) -- Copy partitioned data files into tables for testing purposes @@ -38,14 +38,14 @@ COPY :Table_Part_01 FROM :'Table_File_01'; COPY :Table_Part_02 FROM :'Table_File_02'; COPY :Table_Part_03 FROM :'Table_File_03'; SELECT COUNT(*) FROM :Table_Part_00; - count -------- + count +--------------------------------------------------------------------- 3 (1 row) SELECT COUNT(*) FROM :Table_Part_03; - count -------- + count +--------------------------------------------------------------------- 7022 (1 row) @@ -55,8 +55,8 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_Columns FROM :Table_Part_00 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column < 101 ) diff; - diff_lhs_00 -------------- + diff_lhs_00 +--------------------------------------------------------------------- 0 (1 row) @@ -65,8 +65,8 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 101 AND :Partition_Column < 12000 ) diff; - diff_lhs_01 -------------- + diff_lhs_01 +--------------------------------------------------------------------- 0 (1 row) @@ -75,8 +75,8 @@ SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 12000 AND :Partition_Column < 18000 ) diff; - diff_lhs_02 -------------- + diff_lhs_02 +--------------------------------------------------------------------- 0 (1 row) @@ -84,8 +84,8 @@ SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_Columns FROM :Table_Part_03 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 18000 ) diff; - diff_lhs_03 -------------- + diff_lhs_03 +--------------------------------------------------------------------- 0 (1 row) @@ -93,8 +93,8 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column < 101 EXCEPT ALL :Select_Columns FROM :Table_Part_00 ) diff; - diff_rhs_00 -------------- + diff_rhs_00 +--------------------------------------------------------------------- 0 (1 row) @@ -103,8 +103,8 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :Partition_Column >= 101 AND :Partition_Column < 12000 EXCEPT ALL :Select_Columns FROM :Table_Part_01 ) diff; - diff_rhs_01 -------------- + diff_rhs_01 +--------------------------------------------------------------------- 0 (1 row) @@ -113,8 +113,8 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( :Partition_Column >= 12000 AND :Partition_Column < 18000 EXCEPT ALL :Select_Columns FROM :Table_Part_02 ) diff; - diff_rhs_02 -------------- + diff_rhs_02 +--------------------------------------------------------------------- 0 (1 row) @@ -122,8 +122,8 @@ SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 18000 EXCEPT ALL :Select_Columns FROM :Table_Part_03 ) diff; - diff_rhs_03 -------------- + diff_rhs_03 +--------------------------------------------------------------------- 0 (1 row)